2024-11-14 09:28:46,630 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-14 09:28:46,646 main DEBUG Took 0.013766 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-14 09:28:46,646 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-14 09:28:46,647 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-14 09:28:46,648 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-14 09:28:46,650 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-14 09:28:46,658 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-14 09:28:46,674 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-14 09:28:46,676 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-14 09:28:46,677 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-14 09:28:46,677 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-14 09:28:46,678 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-14 09:28:46,678 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-14 09:28:46,679 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-14 09:28:46,679 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-14 09:28:46,680 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-14 09:28:46,680 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-14 09:28:46,682 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-14 09:28:46,682 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-14 09:28:46,683 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-14 09:28:46,683 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-14 09:28:46,684 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-14 09:28:46,684 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-14 09:28:46,685 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-14 09:28:46,685 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-14 09:28:46,686 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-14 09:28:46,686 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-14 09:28:46,687 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-14 09:28:46,687 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-14 09:28:46,688 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-14 09:28:46,688 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-14 09:28:46,689 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-14 09:28:46,689 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-14 09:28:46,691 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-14 09:28:46,692 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-14 09:28:46,695 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-14 09:28:46,695 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-14 09:28:46,697 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-14 09:28:46,697 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-14 09:28:46,708 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-14 09:28:46,711 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-14 09:28:46,713 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-14 09:28:46,714 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-14 09:28:46,714 main DEBUG createAppenders(={Console}) 2024-11-14 09:28:46,715 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca initialized 2024-11-14 09:28:46,715 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-14 09:28:46,716 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca OK. 2024-11-14 09:28:46,717 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-14 09:28:46,717 main DEBUG OutputStream closed 2024-11-14 09:28:46,717 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-14 09:28:46,718 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-14 09:28:46,718 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@6404f418 OK 2024-11-14 09:28:46,809 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-14 09:28:46,812 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-14 09:28:46,813 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-14 09:28:46,815 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-14 09:28:46,815 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-14 09:28:46,816 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-14 09:28:46,816 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-14 09:28:46,817 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-14 09:28:46,817 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-14 09:28:46,818 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-14 09:28:46,818 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-14 09:28:46,818 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-14 09:28:46,819 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-14 09:28:46,819 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-14 09:28:46,820 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-14 09:28:46,820 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-14 09:28:46,821 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-14 09:28:46,822 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-14 09:28:46,825 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-14 09:28:46,825 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-logging/target/hbase-logging-3.0.0-beta-2-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6dab9b6d) with optional ClassLoader: null 2024-11-14 09:28:46,826 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-14 09:28:46,826 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6dab9b6d] started OK. 2024-11-14T09:28:47,144 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6bf3454-f7e1-a565-c984-10ac7976def3 2024-11-14 09:28:47,148 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-14 09:28:47,149 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-14T09:28:47,163 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestLogRolling timeout: 13 mins 2024-11-14T09:28:47,204 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=12, OpenFileDescriptor=287, MaxFileDescriptor=1048576, SystemLoadAverage=260, ProcessCount=11, AvailableMemoryMB=6968 2024-11-14T09:28:47,207 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-14T09:28:47,227 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6bf3454-f7e1-a565-c984-10ac7976def3/cluster_ff8efdb6-df0f-5b8c-ec0f-5bdf2b3293cd, deleteOnExit=true 2024-11-14T09:28:47,227 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-14T09:28:47,228 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6bf3454-f7e1-a565-c984-10ac7976def3/test.cache.data in system properties and HBase conf 2024-11-14T09:28:47,229 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6bf3454-f7e1-a565-c984-10ac7976def3/hadoop.tmp.dir in system properties and HBase conf 2024-11-14T09:28:47,230 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6bf3454-f7e1-a565-c984-10ac7976def3/hadoop.log.dir in system properties and HBase conf 2024-11-14T09:28:47,230 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6bf3454-f7e1-a565-c984-10ac7976def3/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-14T09:28:47,231 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6bf3454-f7e1-a565-c984-10ac7976def3/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-14T09:28:47,232 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-14T09:28:47,333 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-14T09:28:47,452 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-14T09:28:47,456 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6bf3454-f7e1-a565-c984-10ac7976def3/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-14T09:28:47,457 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6bf3454-f7e1-a565-c984-10ac7976def3/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-14T09:28:47,457 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6bf3454-f7e1-a565-c984-10ac7976def3/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-14T09:28:47,458 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6bf3454-f7e1-a565-c984-10ac7976def3/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-14T09:28:47,459 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6bf3454-f7e1-a565-c984-10ac7976def3/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-14T09:28:47,459 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6bf3454-f7e1-a565-c984-10ac7976def3/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-14T09:28:47,460 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6bf3454-f7e1-a565-c984-10ac7976def3/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-14T09:28:47,461 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6bf3454-f7e1-a565-c984-10ac7976def3/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-14T09:28:47,461 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6bf3454-f7e1-a565-c984-10ac7976def3/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-14T09:28:47,462 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6bf3454-f7e1-a565-c984-10ac7976def3/nfs.dump.dir in system properties and HBase conf 2024-11-14T09:28:47,463 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6bf3454-f7e1-a565-c984-10ac7976def3/java.io.tmpdir in system properties and HBase conf 2024-11-14T09:28:47,463 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6bf3454-f7e1-a565-c984-10ac7976def3/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-14T09:28:47,464 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6bf3454-f7e1-a565-c984-10ac7976def3/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-14T09:28:47,464 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6bf3454-f7e1-a565-c984-10ac7976def3/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-14T09:28:48,003 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-14T09:28:48,383 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-14T09:28:48,480 INFO [Time-limited test {}] log.Log(170): Logging initialized @2665ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-14T09:28:48,571 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T09:28:48,642 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T09:28:48,669 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T09:28:48,670 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T09:28:48,672 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-14T09:28:48,689 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T09:28:48,692 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7c5202f3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6bf3454-f7e1-a565-c984-10ac7976def3/hadoop.log.dir/,AVAILABLE} 2024-11-14T09:28:48,693 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5ae7f863{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T09:28:48,960 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7c1a236c{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6bf3454-f7e1-a565-c984-10ac7976def3/java.io.tmpdir/jetty-localhost-38623-hadoop-hdfs-3_4_1-tests_jar-_-any-11143540503173362422/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-14T09:28:48,969 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5a625720{HTTP/1.1, (http/1.1)}{localhost:38623} 2024-11-14T09:28:48,969 INFO [Time-limited test {}] server.Server(415): Started @3156ms 2024-11-14T09:28:49,003 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-14T09:28:49,418 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T09:28:49,426 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T09:28:49,427 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T09:28:49,428 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T09:28:49,428 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-14T09:28:49,429 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@74548cdc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6bf3454-f7e1-a565-c984-10ac7976def3/hadoop.log.dir/,AVAILABLE} 2024-11-14T09:28:49,429 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3fc5598e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T09:28:49,573 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4a47b0ed{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6bf3454-f7e1-a565-c984-10ac7976def3/java.io.tmpdir/jetty-localhost-38657-hadoop-hdfs-3_4_1-tests_jar-_-any-503095275253971444/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T09:28:49,574 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@58445dea{HTTP/1.1, (http/1.1)}{localhost:38657} 2024-11-14T09:28:49,574 INFO [Time-limited test {}] server.Server(415): Started @3760ms 2024-11-14T09:28:49,647 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T09:28:49,796 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T09:28:49,806 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T09:28:49,808 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T09:28:49,808 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T09:28:49,808 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-14T09:28:49,809 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6176039d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6bf3454-f7e1-a565-c984-10ac7976def3/hadoop.log.dir/,AVAILABLE} 2024-11-14T09:28:49,810 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@b18aeba{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T09:28:49,969 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@54b536b1{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6bf3454-f7e1-a565-c984-10ac7976def3/java.io.tmpdir/jetty-localhost-37379-hadoop-hdfs-3_4_1-tests_jar-_-any-7193948262043287478/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T09:28:49,970 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@59ce19fe{HTTP/1.1, (http/1.1)}{localhost:37379} 2024-11-14T09:28:49,970 INFO [Time-limited test {}] server.Server(415): Started @4157ms 2024-11-14T09:28:49,973 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T09:28:50,210 WARN [Thread-97 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6bf3454-f7e1-a565-c984-10ac7976def3/cluster_ff8efdb6-df0f-5b8c-ec0f-5bdf2b3293cd/data/data1/current/BP-742282143-172.17.0.2-1731576528106/current, will proceed with Du for space computation calculation, 2024-11-14T09:28:50,210 WARN [Thread-95 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6bf3454-f7e1-a565-c984-10ac7976def3/cluster_ff8efdb6-df0f-5b8c-ec0f-5bdf2b3293cd/data/data3/current/BP-742282143-172.17.0.2-1731576528106/current, will proceed with Du for space computation calculation, 2024-11-14T09:28:50,210 WARN [Thread-96 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6bf3454-f7e1-a565-c984-10ac7976def3/cluster_ff8efdb6-df0f-5b8c-ec0f-5bdf2b3293cd/data/data4/current/BP-742282143-172.17.0.2-1731576528106/current, will proceed with Du for space computation calculation, 2024-11-14T09:28:50,212 WARN [Thread-98 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6bf3454-f7e1-a565-c984-10ac7976def3/cluster_ff8efdb6-df0f-5b8c-ec0f-5bdf2b3293cd/data/data2/current/BP-742282143-172.17.0.2-1731576528106/current, will proceed with Du for space computation calculation, 2024-11-14T09:28:50,281 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T09:28:50,282 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T09:28:50,367 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbedf345ccddd60f5 with lease ID 0x9cffd41d9af742a8: Processing first storage report for DS-fd3d3121-ce85-47e9-a21d-1c9cbcfa9d90 from datanode DatanodeRegistration(127.0.0.1:32977, datanodeUuid=e705dbf7-6fba-4e42-9825-9befd79346a6, infoPort=44137, infoSecurePort=0, ipcPort=38455, storageInfo=lv=-57;cid=testClusterID;nsid=1393922743;c=1731576528106) 2024-11-14T09:28:50,369 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbedf345ccddd60f5 with lease ID 0x9cffd41d9af742a8: from storage DS-fd3d3121-ce85-47e9-a21d-1c9cbcfa9d90 node DatanodeRegistration(127.0.0.1:32977, datanodeUuid=e705dbf7-6fba-4e42-9825-9befd79346a6, infoPort=44137, infoSecurePort=0, ipcPort=38455, storageInfo=lv=-57;cid=testClusterID;nsid=1393922743;c=1731576528106), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-11-14T09:28:50,370 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa40d62f7d0f2aca1 with lease ID 0x9cffd41d9af742a7: Processing first storage report for DS-ae795588-c0e7-4adc-aa9e-e383c97c62c4 from datanode DatanodeRegistration(127.0.0.1:33135, datanodeUuid=2d66a2d2-cf87-4ad6-acc4-ad404bbab6c3, infoPort=41647, infoSecurePort=0, ipcPort=43925, storageInfo=lv=-57;cid=testClusterID;nsid=1393922743;c=1731576528106) 2024-11-14T09:28:50,370 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa40d62f7d0f2aca1 with lease ID 0x9cffd41d9af742a7: from storage DS-ae795588-c0e7-4adc-aa9e-e383c97c62c4 node DatanodeRegistration(127.0.0.1:33135, datanodeUuid=2d66a2d2-cf87-4ad6-acc4-ad404bbab6c3, infoPort=41647, infoSecurePort=0, ipcPort=43925, storageInfo=lv=-57;cid=testClusterID;nsid=1393922743;c=1731576528106), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T09:28:50,370 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbedf345ccddd60f5 with lease ID 0x9cffd41d9af742a8: Processing first storage report for DS-bbfb2e47-9c0f-4209-be70-d6407e1e3a7d from datanode DatanodeRegistration(127.0.0.1:32977, datanodeUuid=e705dbf7-6fba-4e42-9825-9befd79346a6, infoPort=44137, infoSecurePort=0, ipcPort=38455, storageInfo=lv=-57;cid=testClusterID;nsid=1393922743;c=1731576528106) 2024-11-14T09:28:50,370 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbedf345ccddd60f5 with lease ID 0x9cffd41d9af742a8: from storage DS-bbfb2e47-9c0f-4209-be70-d6407e1e3a7d node DatanodeRegistration(127.0.0.1:32977, datanodeUuid=e705dbf7-6fba-4e42-9825-9befd79346a6, infoPort=44137, infoSecurePort=0, ipcPort=38455, storageInfo=lv=-57;cid=testClusterID;nsid=1393922743;c=1731576528106), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T09:28:50,371 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa40d62f7d0f2aca1 with lease ID 0x9cffd41d9af742a7: Processing first storage report for DS-9953e1b1-c036-4a20-9fa8-7f480b2cacab from datanode DatanodeRegistration(127.0.0.1:33135, datanodeUuid=2d66a2d2-cf87-4ad6-acc4-ad404bbab6c3, infoPort=41647, infoSecurePort=0, ipcPort=43925, storageInfo=lv=-57;cid=testClusterID;nsid=1393922743;c=1731576528106) 2024-11-14T09:28:50,371 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa40d62f7d0f2aca1 with lease ID 0x9cffd41d9af742a7: from storage DS-9953e1b1-c036-4a20-9fa8-7f480b2cacab node DatanodeRegistration(127.0.0.1:33135, datanodeUuid=2d66a2d2-cf87-4ad6-acc4-ad404bbab6c3, infoPort=41647, infoSecurePort=0, ipcPort=43925, storageInfo=lv=-57;cid=testClusterID;nsid=1393922743;c=1731576528106), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-14T09:28:50,429 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6bf3454-f7e1-a565-c984-10ac7976def3 2024-11-14T09:28:50,512 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6bf3454-f7e1-a565-c984-10ac7976def3/cluster_ff8efdb6-df0f-5b8c-ec0f-5bdf2b3293cd/zookeeper_0, clientPort=56289, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6bf3454-f7e1-a565-c984-10ac7976def3/cluster_ff8efdb6-df0f-5b8c-ec0f-5bdf2b3293cd/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6bf3454-f7e1-a565-c984-10ac7976def3/cluster_ff8efdb6-df0f-5b8c-ec0f-5bdf2b3293cd/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-14T09:28:50,522 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=56289 2024-11-14T09:28:50,537 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:28:50,540 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:28:50,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33135 is added to blk_1073741825_1001 (size=7) 2024-11-14T09:28:50,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32977 is added to blk_1073741825_1001 (size=7) 2024-11-14T09:28:51,233 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a with version=8 2024-11-14T09:28:51,233 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/hbase-staging 2024-11-14T09:28:51,325 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-14T09:28:51,578 INFO [Time-limited test {}] client.ConnectionUtils(128): master/83f56b55f2af:0 server-side Connection retries=45 2024-11-14T09:28:51,589 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T09:28:51,589 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-14T09:28:51,594 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-14T09:28:51,594 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T09:28:51,595 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-14T09:28:51,735 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-14T09:28:51,797 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-14T09:28:51,806 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-14T09:28:51,810 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-14T09:28:51,837 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 23528 (auto-detected) 2024-11-14T09:28:51,838 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-14T09:28:51,858 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41035 2024-11-14T09:28:51,881 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:41035 connecting to ZooKeeper ensemble=127.0.0.1:56289 2024-11-14T09:28:51,916 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:410350x0, quorum=127.0.0.1:56289, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-14T09:28:51,919 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:41035-0x10115cef0ca0000 connected 2024-11-14T09:28:51,957 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:28:51,960 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:28:51,974 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41035-0x10115cef0ca0000, quorum=127.0.0.1:56289, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T09:28:51,979 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a, hbase.cluster.distributed=false 2024-11-14T09:28:52,008 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41035-0x10115cef0ca0000, quorum=127.0.0.1:56289, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-14T09:28:52,013 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41035 2024-11-14T09:28:52,014 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41035 2024-11-14T09:28:52,014 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41035 2024-11-14T09:28:52,015 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41035 2024-11-14T09:28:52,015 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41035 2024-11-14T09:28:52,138 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/83f56b55f2af:0 server-side Connection retries=45 2024-11-14T09:28:52,140 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T09:28:52,140 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-14T09:28:52,141 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-14T09:28:52,141 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T09:28:52,141 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-14T09:28:52,144 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-14T09:28:52,146 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-14T09:28:52,147 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:43205 2024-11-14T09:28:52,149 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:43205 connecting to ZooKeeper ensemble=127.0.0.1:56289 2024-11-14T09:28:52,150 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:28:52,154 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:28:52,163 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:432050x0, quorum=127.0.0.1:56289, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-14T09:28:52,164 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:43205-0x10115cef0ca0001 connected 2024-11-14T09:28:52,164 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43205-0x10115cef0ca0001, quorum=127.0.0.1:56289, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T09:28:52,169 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-14T09:28:52,177 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-14T09:28:52,179 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43205-0x10115cef0ca0001, quorum=127.0.0.1:56289, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-14T09:28:52,184 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43205-0x10115cef0ca0001, quorum=127.0.0.1:56289, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-14T09:28:52,184 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43205 2024-11-14T09:28:52,185 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43205 2024-11-14T09:28:52,185 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43205 2024-11-14T09:28:52,187 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43205 2024-11-14T09:28:52,190 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43205 2024-11-14T09:28:52,209 DEBUG [M:0;83f56b55f2af:41035 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;83f56b55f2af:41035 2024-11-14T09:28:52,210 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/83f56b55f2af,41035,1731576531378 2024-11-14T09:28:52,217 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43205-0x10115cef0ca0001, quorum=127.0.0.1:56289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T09:28:52,217 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41035-0x10115cef0ca0000, quorum=127.0.0.1:56289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T09:28:52,219 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41035-0x10115cef0ca0000, quorum=127.0.0.1:56289, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/83f56b55f2af,41035,1731576531378 2024-11-14T09:28:52,241 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41035-0x10115cef0ca0000, quorum=127.0.0.1:56289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:28:52,241 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43205-0x10115cef0ca0001, quorum=127.0.0.1:56289, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-14T09:28:52,242 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43205-0x10115cef0ca0001, quorum=127.0.0.1:56289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:28:52,243 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41035-0x10115cef0ca0000, quorum=127.0.0.1:56289, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-14T09:28:52,244 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/83f56b55f2af,41035,1731576531378 from backup master directory 2024-11-14T09:28:52,247 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41035-0x10115cef0ca0000, quorum=127.0.0.1:56289, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/83f56b55f2af,41035,1731576531378 2024-11-14T09:28:52,247 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43205-0x10115cef0ca0001, quorum=127.0.0.1:56289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T09:28:52,248 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41035-0x10115cef0ca0000, quorum=127.0.0.1:56289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T09:28:52,248 WARN [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-14T09:28:52,249 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=83f56b55f2af,41035,1731576531378 2024-11-14T09:28:52,252 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-14T09:28:52,253 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-14T09:28:52,313 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/hbase.id] with ID: e38f99b2-9c39-4866-af2f-9a48e3ec1daf 2024-11-14T09:28:52,313 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/.tmp/hbase.id 2024-11-14T09:28:52,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32977 is added to blk_1073741826_1002 (size=42) 2024-11-14T09:28:52,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33135 is added to blk_1073741826_1002 (size=42) 2024-11-14T09:28:52,327 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/.tmp/hbase.id]:[hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/hbase.id] 2024-11-14T09:28:52,375 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:28:52,380 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-14T09:28:52,399 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 17ms. 2024-11-14T09:28:52,403 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43205-0x10115cef0ca0001, quorum=127.0.0.1:56289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:28:52,403 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41035-0x10115cef0ca0000, quorum=127.0.0.1:56289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:28:52,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33135 is added to blk_1073741827_1003 (size=196) 2024-11-14T09:28:52,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32977 is added to blk_1073741827_1003 (size=196) 2024-11-14T09:28:52,437 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-14T09:28:52,439 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-14T09:28:52,445 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T09:28:52,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33135 is added to blk_1073741828_1004 (size=1189) 2024-11-14T09:28:52,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32977 is added to blk_1073741828_1004 (size=1189) 2024-11-14T09:28:52,510 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/MasterData/data/master/store 2024-11-14T09:28:52,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33135 is added to blk_1073741829_1005 (size=34) 2024-11-14T09:28:52,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32977 is added to blk_1073741829_1005 (size=34) 2024-11-14T09:28:52,539 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-14T09:28:52,542 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T09:28:52,543 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-14T09:28:52,543 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:28:52,543 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:28:52,544 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-14T09:28:52,545 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:28:52,545 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:28:52,546 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731576532543Disabling compacts and flushes for region at 1731576532543Disabling writes for close at 1731576532545 (+2 ms)Writing region close event to WAL at 1731576532545Closed at 1731576532545 2024-11-14T09:28:52,548 WARN [master/83f56b55f2af:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/MasterData/data/master/store/.initializing 2024-11-14T09:28:52,548 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/MasterData/WALs/83f56b55f2af,41035,1731576531378 2024-11-14T09:28:52,570 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=83f56b55f2af%2C41035%2C1731576531378, suffix=, logDir=hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/MasterData/WALs/83f56b55f2af,41035,1731576531378, archiveDir=hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/MasterData/oldWALs, maxLogs=10 2024-11-14T09:28:52,579 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 83f56b55f2af%2C41035%2C1731576531378.1731576532575 2024-11-14T09:28:52,598 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/MasterData/WALs/83f56b55f2af,41035,1731576531378/83f56b55f2af%2C41035%2C1731576531378.1731576532575 2024-11-14T09:28:52,614 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44137:44137),(127.0.0.1/127.0.0.1:41647:41647)] 2024-11-14T09:28:52,617 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-14T09:28:52,617 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T09:28:52,621 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:28:52,622 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:28:52,659 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:28:52,687 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-14T09:28:52,692 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:28:52,695 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:28:52,696 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:28:52,699 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-14T09:28:52,699 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:28:52,700 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T09:28:52,701 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:28:52,703 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-14T09:28:52,703 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:28:52,704 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T09:28:52,704 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:28:52,707 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-14T09:28:52,707 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:28:52,708 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T09:28:52,708 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:28:52,711 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:28:52,713 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:28:52,717 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:28:52,718 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:28:52,722 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-14T09:28:52,725 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:28:52,729 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T09:28:52,731 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=705270, jitterRate=-0.10320299863815308}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-14T09:28:52,737 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731576532634Initializing all the Stores at 1731576532636 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731576532637 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731576532638 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731576532638Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731576532638Cleaning up temporary data from old regions at 1731576532718 (+80 ms)Region opened successfully at 1731576532737 (+19 ms) 2024-11-14T09:28:52,738 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-14T09:28:52,773 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5441034b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=83f56b55f2af/172.17.0.2:0 2024-11-14T09:28:52,804 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-14T09:28:52,815 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-14T09:28:52,816 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-14T09:28:52,819 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-14T09:28:52,820 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-14T09:28:52,825 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 4 msec 2024-11-14T09:28:52,825 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-14T09:28:52,850 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-14T09:28:52,859 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41035-0x10115cef0ca0000, quorum=127.0.0.1:56289, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-14T09:28:52,861 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-14T09:28:52,864 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-14T09:28:52,866 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41035-0x10115cef0ca0000, quorum=127.0.0.1:56289, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-14T09:28:52,869 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-14T09:28:52,872 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-14T09:28:52,876 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41035-0x10115cef0ca0000, quorum=127.0.0.1:56289, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-14T09:28:52,878 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-14T09:28:52,879 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41035-0x10115cef0ca0000, quorum=127.0.0.1:56289, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-14T09:28:52,881 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-14T09:28:52,898 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41035-0x10115cef0ca0000, quorum=127.0.0.1:56289, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-14T09:28:52,899 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-14T09:28:52,905 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41035-0x10115cef0ca0000, quorum=127.0.0.1:56289, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-14T09:28:52,905 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43205-0x10115cef0ca0001, quorum=127.0.0.1:56289, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-14T09:28:52,905 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41035-0x10115cef0ca0000, quorum=127.0.0.1:56289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:28:52,905 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43205-0x10115cef0ca0001, quorum=127.0.0.1:56289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:28:52,908 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=83f56b55f2af,41035,1731576531378, sessionid=0x10115cef0ca0000, setting cluster-up flag (Was=false) 2024-11-14T09:28:52,922 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43205-0x10115cef0ca0001, quorum=127.0.0.1:56289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:28:52,922 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41035-0x10115cef0ca0000, quorum=127.0.0.1:56289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:28:52,929 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-14T09:28:52,932 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=83f56b55f2af,41035,1731576531378 2024-11-14T09:28:52,937 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41035-0x10115cef0ca0000, quorum=127.0.0.1:56289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:28:52,937 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43205-0x10115cef0ca0001, quorum=127.0.0.1:56289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:28:52,944 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-14T09:28:52,946 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=83f56b55f2af,41035,1731576531378 2024-11-14T09:28:52,952 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-14T09:28:52,994 INFO [RS:0;83f56b55f2af:43205 {}] regionserver.HRegionServer(746): ClusterId : e38f99b2-9c39-4866-af2f-9a48e3ec1daf 2024-11-14T09:28:52,997 DEBUG [RS:0;83f56b55f2af:43205 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-14T09:28:53,001 DEBUG [RS:0;83f56b55f2af:43205 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-14T09:28:53,002 DEBUG [RS:0;83f56b55f2af:43205 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-14T09:28:53,005 DEBUG [RS:0;83f56b55f2af:43205 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-14T09:28:53,006 DEBUG [RS:0;83f56b55f2af:43205 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6ceba585, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=83f56b55f2af/172.17.0.2:0 2024-11-14T09:28:53,021 DEBUG [RS:0;83f56b55f2af:43205 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;83f56b55f2af:43205 2024-11-14T09:28:53,024 INFO [RS:0;83f56b55f2af:43205 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-14T09:28:53,024 INFO [RS:0;83f56b55f2af:43205 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-14T09:28:53,024 DEBUG [RS:0;83f56b55f2af:43205 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-14T09:28:53,027 INFO [RS:0;83f56b55f2af:43205 {}] regionserver.HRegionServer(2659): reportForDuty to master=83f56b55f2af,41035,1731576531378 with port=43205, startcode=1731576532097 2024-11-14T09:28:53,030 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-14T09:28:53,038 DEBUG [RS:0;83f56b55f2af:43205 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-14T09:28:53,040 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-14T09:28:53,047 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-14T09:28:53,054 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 83f56b55f2af,41035,1731576531378 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-14T09:28:53,061 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/83f56b55f2af:0, corePoolSize=5, maxPoolSize=5 2024-11-14T09:28:53,061 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/83f56b55f2af:0, corePoolSize=5, maxPoolSize=5 2024-11-14T09:28:53,061 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/83f56b55f2af:0, corePoolSize=5, maxPoolSize=5 2024-11-14T09:28:53,061 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/83f56b55f2af:0, corePoolSize=5, maxPoolSize=5 2024-11-14T09:28:53,061 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/83f56b55f2af:0, corePoolSize=10, maxPoolSize=10 2024-11-14T09:28:53,061 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:28:53,062 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/83f56b55f2af:0, corePoolSize=2, maxPoolSize=2 2024-11-14T09:28:53,062 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:28:53,064 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731576563064 2024-11-14T09:28:53,066 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-14T09:28:53,067 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T09:28:53,067 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-14T09:28:53,067 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-14T09:28:53,072 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-14T09:28:53,072 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-14T09:28:53,073 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-14T09:28:53,073 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-14T09:28:53,073 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:28:53,074 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-14T09:28:53,074 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-14T09:28:53,080 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-14T09:28:53,081 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-14T09:28:53,082 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-14T09:28:53,084 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-14T09:28:53,085 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-14T09:28:53,090 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/83f56b55f2af:0:becomeActiveMaster-HFileCleaner.large.0-1731576533086,5,FailOnTimeoutGroup] 2024-11-14T09:28:53,093 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/83f56b55f2af:0:becomeActiveMaster-HFileCleaner.small.0-1731576533092,5,FailOnTimeoutGroup] 2024-11-14T09:28:53,093 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-14T09:28:53,094 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-14T09:28:53,095 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-14T09:28:53,095 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-14T09:28:53,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33135 is added to blk_1073741831_1007 (size=1321) 2024-11-14T09:28:53,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32977 is added to blk_1073741831_1007 (size=1321) 2024-11-14T09:28:53,101 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-14T09:28:53,102 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a 2024-11-14T09:28:53,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32977 is added to blk_1073741832_1008 (size=32) 2024-11-14T09:28:53,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33135 is added to blk_1073741832_1008 (size=32) 2024-11-14T09:28:53,118 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T09:28:53,120 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-14T09:28:53,123 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-14T09:28:53,123 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60731, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-14T09:28:53,123 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:28:53,124 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:28:53,125 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-14T09:28:53,128 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-14T09:28:53,128 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:28:53,129 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:28:53,129 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-14T09:28:53,131 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41035 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 83f56b55f2af,43205,1731576532097 2024-11-14T09:28:53,132 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-14T09:28:53,132 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:28:53,134 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:28:53,134 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-14T09:28:53,134 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41035 {}] master.ServerManager(517): Registering regionserver=83f56b55f2af,43205,1731576532097 2024-11-14T09:28:53,137 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-14T09:28:53,137 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:28:53,138 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:28:53,138 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-14T09:28:53,140 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/data/hbase/meta/1588230740 2024-11-14T09:28:53,141 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/data/hbase/meta/1588230740 2024-11-14T09:28:53,144 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-14T09:28:53,144 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-14T09:28:53,145 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-14T09:28:53,148 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-14T09:28:53,151 DEBUG [RS:0;83f56b55f2af:43205 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a 2024-11-14T09:28:53,152 DEBUG [RS:0;83f56b55f2af:43205 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:33501 2024-11-14T09:28:53,152 DEBUG [RS:0;83f56b55f2af:43205 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-14T09:28:53,152 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T09:28:53,153 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=882067, jitterRate=0.12160734832286835}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-14T09:28:53,156 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41035-0x10115cef0ca0000, quorum=127.0.0.1:56289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-14T09:28:53,157 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731576533118Initializing all the Stores at 1731576533120 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731576533120Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731576533120Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731576533120Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731576533120Cleaning up temporary data from old regions at 1731576533144 (+24 ms)Region opened successfully at 1731576533157 (+13 ms) 2024-11-14T09:28:53,157 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-14T09:28:53,157 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-14T09:28:53,157 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-14T09:28:53,158 DEBUG [RS:0;83f56b55f2af:43205 {}] zookeeper.ZKUtil(111): regionserver:43205-0x10115cef0ca0001, quorum=127.0.0.1:56289, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/83f56b55f2af,43205,1731576532097 2024-11-14T09:28:53,158 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-14T09:28:53,158 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-14T09:28:53,158 WARN [RS:0;83f56b55f2af:43205 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-14T09:28:53,158 INFO [RS:0;83f56b55f2af:43205 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T09:28:53,158 DEBUG [RS:0;83f56b55f2af:43205 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/WALs/83f56b55f2af,43205,1731576532097 2024-11-14T09:28:53,159 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-14T09:28:53,159 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731576533157Disabling compacts and flushes for region at 1731576533157Disabling writes for close at 1731576533158 (+1 ms)Writing region close event to WAL at 1731576533159 (+1 ms)Closed at 1731576533159 2024-11-14T09:28:53,161 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [83f56b55f2af,43205,1731576532097] 2024-11-14T09:28:53,163 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T09:28:53,163 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-14T09:28:53,170 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-14T09:28:53,179 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-14T09:28:53,181 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-14T09:28:53,188 INFO [RS:0;83f56b55f2af:43205 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-14T09:28:53,200 INFO [RS:0;83f56b55f2af:43205 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-14T09:28:53,206 INFO [RS:0;83f56b55f2af:43205 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-14T09:28:53,206 INFO [RS:0;83f56b55f2af:43205 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T09:28:53,207 INFO [RS:0;83f56b55f2af:43205 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-14T09:28:53,214 INFO [RS:0;83f56b55f2af:43205 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-14T09:28:53,216 INFO [RS:0;83f56b55f2af:43205 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-14T09:28:53,216 DEBUG [RS:0;83f56b55f2af:43205 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:28:53,216 DEBUG [RS:0;83f56b55f2af:43205 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:28:53,216 DEBUG [RS:0;83f56b55f2af:43205 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:28:53,216 DEBUG [RS:0;83f56b55f2af:43205 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:28:53,217 DEBUG [RS:0;83f56b55f2af:43205 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:28:53,217 DEBUG [RS:0;83f56b55f2af:43205 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/83f56b55f2af:0, corePoolSize=2, maxPoolSize=2 2024-11-14T09:28:53,217 DEBUG [RS:0;83f56b55f2af:43205 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:28:53,217 DEBUG [RS:0;83f56b55f2af:43205 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:28:53,217 DEBUG [RS:0;83f56b55f2af:43205 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:28:53,217 DEBUG [RS:0;83f56b55f2af:43205 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:28:53,217 DEBUG [RS:0;83f56b55f2af:43205 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:28:53,217 DEBUG [RS:0;83f56b55f2af:43205 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:28:53,218 DEBUG [RS:0;83f56b55f2af:43205 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/83f56b55f2af:0, corePoolSize=3, maxPoolSize=3 2024-11-14T09:28:53,218 DEBUG [RS:0;83f56b55f2af:43205 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/83f56b55f2af:0, corePoolSize=3, maxPoolSize=3 2024-11-14T09:28:53,219 INFO [RS:0;83f56b55f2af:43205 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-14T09:28:53,219 INFO [RS:0;83f56b55f2af:43205 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-14T09:28:53,219 INFO [RS:0;83f56b55f2af:43205 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T09:28:53,219 INFO [RS:0;83f56b55f2af:43205 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-14T09:28:53,219 INFO [RS:0;83f56b55f2af:43205 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-14T09:28:53,219 INFO [RS:0;83f56b55f2af:43205 {}] hbase.ChoreService(168): Chore ScheduledChore name=83f56b55f2af,43205,1731576532097-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-14T09:28:53,238 INFO [RS:0;83f56b55f2af:43205 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-14T09:28:53,240 INFO [RS:0;83f56b55f2af:43205 {}] hbase.ChoreService(168): Chore ScheduledChore name=83f56b55f2af,43205,1731576532097-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T09:28:53,240 INFO [RS:0;83f56b55f2af:43205 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T09:28:53,240 INFO [RS:0;83f56b55f2af:43205 {}] regionserver.Replication(171): 83f56b55f2af,43205,1731576532097 started 2024-11-14T09:28:53,259 INFO [RS:0;83f56b55f2af:43205 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T09:28:53,259 INFO [RS:0;83f56b55f2af:43205 {}] regionserver.HRegionServer(1482): Serving as 83f56b55f2af,43205,1731576532097, RpcServer on 83f56b55f2af/172.17.0.2:43205, sessionid=0x10115cef0ca0001 2024-11-14T09:28:53,260 DEBUG [RS:0;83f56b55f2af:43205 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-14T09:28:53,260 DEBUG [RS:0;83f56b55f2af:43205 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 83f56b55f2af,43205,1731576532097 2024-11-14T09:28:53,260 DEBUG [RS:0;83f56b55f2af:43205 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '83f56b55f2af,43205,1731576532097' 2024-11-14T09:28:53,260 DEBUG [RS:0;83f56b55f2af:43205 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-14T09:28:53,261 DEBUG [RS:0;83f56b55f2af:43205 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-14T09:28:53,262 DEBUG [RS:0;83f56b55f2af:43205 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-14T09:28:53,262 DEBUG [RS:0;83f56b55f2af:43205 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-14T09:28:53,262 DEBUG [RS:0;83f56b55f2af:43205 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 83f56b55f2af,43205,1731576532097 2024-11-14T09:28:53,262 DEBUG [RS:0;83f56b55f2af:43205 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '83f56b55f2af,43205,1731576532097' 2024-11-14T09:28:53,262 DEBUG [RS:0;83f56b55f2af:43205 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-14T09:28:53,263 DEBUG [RS:0;83f56b55f2af:43205 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-14T09:28:53,264 DEBUG [RS:0;83f56b55f2af:43205 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-14T09:28:53,264 INFO [RS:0;83f56b55f2af:43205 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-14T09:28:53,264 INFO [RS:0;83f56b55f2af:43205 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-14T09:28:53,332 WARN [83f56b55f2af:41035 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-14T09:28:53,372 INFO [RS:0;83f56b55f2af:43205 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=83f56b55f2af%2C43205%2C1731576532097, suffix=, logDir=hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/WALs/83f56b55f2af,43205,1731576532097, archiveDir=hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/oldWALs, maxLogs=32 2024-11-14T09:28:53,375 INFO [RS:0;83f56b55f2af:43205 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 83f56b55f2af%2C43205%2C1731576532097.1731576533375 2024-11-14T09:28:53,383 INFO [RS:0;83f56b55f2af:43205 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/WALs/83f56b55f2af,43205,1731576532097/83f56b55f2af%2C43205%2C1731576532097.1731576533375 2024-11-14T09:28:53,384 DEBUG [RS:0;83f56b55f2af:43205 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41647:41647),(127.0.0.1/127.0.0.1:44137:44137)] 2024-11-14T09:28:53,585 DEBUG [83f56b55f2af:41035 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-14T09:28:53,597 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=83f56b55f2af,43205,1731576532097 2024-11-14T09:28:53,603 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 83f56b55f2af,43205,1731576532097, state=OPENING 2024-11-14T09:28:53,608 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-14T09:28:53,610 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41035-0x10115cef0ca0000, quorum=127.0.0.1:56289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:28:53,610 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43205-0x10115cef0ca0001, quorum=127.0.0.1:56289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:28:53,611 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T09:28:53,611 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T09:28:53,613 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-14T09:28:53,614 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=83f56b55f2af,43205,1731576532097}] 2024-11-14T09:28:53,792 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-14T09:28:53,795 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53635, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-14T09:28:53,806 INFO [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-14T09:28:53,807 INFO [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T09:28:53,810 INFO [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=83f56b55f2af%2C43205%2C1731576532097.meta, suffix=.meta, logDir=hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/WALs/83f56b55f2af,43205,1731576532097, archiveDir=hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/oldWALs, maxLogs=32 2024-11-14T09:28:53,813 INFO [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 83f56b55f2af%2C43205%2C1731576532097.meta.1731576533813.meta 2024-11-14T09:28:53,822 INFO [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/WALs/83f56b55f2af,43205,1731576532097/83f56b55f2af%2C43205%2C1731576532097.meta.1731576533813.meta 2024-11-14T09:28:53,823 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41647:41647),(127.0.0.1/127.0.0.1:44137:44137)] 2024-11-14T09:28:53,824 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-14T09:28:53,826 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-14T09:28:53,829 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-14T09:28:53,834 INFO [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-14T09:28:53,839 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-14T09:28:53,840 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T09:28:53,840 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-14T09:28:53,841 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-14T09:28:53,844 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-14T09:28:53,845 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-14T09:28:53,846 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:28:53,846 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:28:53,847 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-14T09:28:53,848 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-14T09:28:53,848 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:28:53,849 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:28:53,849 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-14T09:28:53,850 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-14T09:28:53,851 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:28:53,851 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:28:53,851 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-14T09:28:53,853 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-14T09:28:53,853 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:28:53,854 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:28:53,854 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-14T09:28:53,855 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/data/hbase/meta/1588230740 2024-11-14T09:28:53,857 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/data/hbase/meta/1588230740 2024-11-14T09:28:53,860 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-14T09:28:53,860 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-14T09:28:53,861 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-14T09:28:53,863 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-14T09:28:53,864 INFO [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=781335, jitterRate=-0.006481289863586426}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-14T09:28:53,865 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-14T09:28:53,866 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731576533841Writing region info on filesystem at 1731576533841Initializing all the Stores at 1731576533843 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731576533843Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731576533844 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731576533844Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731576533844Cleaning up temporary data from old regions at 1731576533860 (+16 ms)Running coprocessor post-open hooks at 1731576533865 (+5 ms)Region opened successfully at 1731576533865 2024-11-14T09:28:53,873 INFO [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731576533783 2024-11-14T09:28:53,885 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-14T09:28:53,886 INFO [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-14T09:28:53,887 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=83f56b55f2af,43205,1731576532097 2024-11-14T09:28:53,889 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 83f56b55f2af,43205,1731576532097, state=OPEN 2024-11-14T09:28:53,895 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41035-0x10115cef0ca0000, quorum=127.0.0.1:56289, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-14T09:28:53,895 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43205-0x10115cef0ca0001, quorum=127.0.0.1:56289, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-14T09:28:53,895 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T09:28:53,895 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=83f56b55f2af,43205,1731576532097 2024-11-14T09:28:53,895 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T09:28:53,901 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-14T09:28:53,901 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=83f56b55f2af,43205,1731576532097 in 281 msec 2024-11-14T09:28:53,907 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-14T09:28:53,907 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 733 msec 2024-11-14T09:28:53,909 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T09:28:53,909 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-14T09:28:53,932 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-14T09:28:53,934 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=83f56b55f2af,43205,1731576532097, seqNum=-1] 2024-11-14T09:28:53,955 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-14T09:28:53,957 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52179, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-14T09:28:53,978 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 995 msec 2024-11-14T09:28:53,978 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731576533978, completionTime=-1 2024-11-14T09:28:53,981 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-14T09:28:53,982 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-14T09:28:54,009 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-14T09:28:54,010 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731576594010 2024-11-14T09:28:54,010 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731576654010 2024-11-14T09:28:54,010 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 28 msec 2024-11-14T09:28:54,013 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=83f56b55f2af,41035,1731576531378-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T09:28:54,013 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=83f56b55f2af,41035,1731576531378-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T09:28:54,013 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=83f56b55f2af,41035,1731576531378-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T09:28:54,015 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-83f56b55f2af:41035, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T09:28:54,015 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-14T09:28:54,015 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-14T09:28:54,022 DEBUG [master/83f56b55f2af:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-14T09:28:54,081 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.831sec 2024-11-14T09:28:54,082 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-14T09:28:54,083 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-14T09:28:54,084 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-14T09:28:54,085 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-14T09:28:54,085 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-14T09:28:54,086 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=83f56b55f2af,41035,1731576531378-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-14T09:28:54,086 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=83f56b55f2af,41035,1731576531378-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-14T09:28:54,096 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-14T09:28:54,097 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-14T09:28:54,097 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=83f56b55f2af,41035,1731576531378-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T09:28:54,104 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@18b51bbe, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T09:28:54,106 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-14T09:28:54,107 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-14T09:28:54,110 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 83f56b55f2af,41035,-1 for getting cluster id 2024-11-14T09:28:54,112 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-14T09:28:54,123 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e38f99b2-9c39-4866-af2f-9a48e3ec1daf' 2024-11-14T09:28:54,126 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-14T09:28:54,126 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e38f99b2-9c39-4866-af2f-9a48e3ec1daf" 2024-11-14T09:28:54,127 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@77436fff, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T09:28:54,127 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [83f56b55f2af,41035,-1] 2024-11-14T09:28:54,131 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-14T09:28:54,133 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T09:28:54,134 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60432, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-14T09:28:54,138 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@543c1b08, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T09:28:54,138 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-14T09:28:54,146 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=83f56b55f2af,43205,1731576532097, seqNum=-1] 2024-11-14T09:28:54,147 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-14T09:28:54,150 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59474, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-14T09:28:54,176 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=83f56b55f2af,41035,1731576531378 2024-11-14T09:28:54,177 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:28:54,186 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-14T09:28:54,191 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-14T09:28:54,196 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 83f56b55f2af,41035,1731576531378 2024-11-14T09:28:54,200 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@2e192e5e 2024-11-14T09:28:54,201 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-14T09:28:54,204 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60442, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-14T09:28:54,206 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41035 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-14T09:28:54,206 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41035 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-14T09:28:54,210 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41035 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testSlowSyncLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-14T09:28:54,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41035 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling 2024-11-14T09:28:54,220 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-14T09:28:54,222 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41035 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testSlowSyncLogRolling" procId is: 4 2024-11-14T09:28:54,223 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:28:54,225 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-14T09:28:54,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41035 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-14T09:28:54,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32977 is added to blk_1073741835_1011 (size=389) 2024-11-14T09:28:54,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33135 is added to blk_1073741835_1011 (size=389) 2024-11-14T09:28:54,260 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 6e1713d974bb85bbc3af5b51982a72b5, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1731576534206.6e1713d974bb85bbc3af5b51982a72b5.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testSlowSyncLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a 2024-11-14T09:28:54,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32977 is added to blk_1073741836_1012 (size=72) 2024-11-14T09:28:54,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33135 is added to blk_1073741836_1012 (size=72) 2024-11-14T09:28:54,276 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1731576534206.6e1713d974bb85bbc3af5b51982a72b5.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T09:28:54,277 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 6e1713d974bb85bbc3af5b51982a72b5, disabling compactions & flushes 2024-11-14T09:28:54,277 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1731576534206.6e1713d974bb85bbc3af5b51982a72b5. 2024-11-14T09:28:54,277 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1731576534206.6e1713d974bb85bbc3af5b51982a72b5. 2024-11-14T09:28:54,277 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1731576534206.6e1713d974bb85bbc3af5b51982a72b5. after waiting 0 ms 2024-11-14T09:28:54,277 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1731576534206.6e1713d974bb85bbc3af5b51982a72b5. 2024-11-14T09:28:54,277 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1731576534206.6e1713d974bb85bbc3af5b51982a72b5. 2024-11-14T09:28:54,277 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 6e1713d974bb85bbc3af5b51982a72b5: Waiting for close lock at 1731576534277Disabling compacts and flushes for region at 1731576534277Disabling writes for close at 1731576534277Writing region close event to WAL at 1731576534277Closed at 1731576534277 2024-11-14T09:28:54,280 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-14T09:28:54,286 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testSlowSyncLogRolling,,1731576534206.6e1713d974bb85bbc3af5b51982a72b5.","families":{"info":[{"qualifier":"regioninfo","vlen":71,"tag":[],"timestamp":"1731576534280"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731576534280"}]},"ts":"1731576534280"} 2024-11-14T09:28:54,294 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-14T09:28:54,299 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-14T09:28:54,302 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731576534299"}]},"ts":"1731576534299"} 2024-11-14T09:28:54,307 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLING in hbase:meta 2024-11-14T09:28:54,308 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=6e1713d974bb85bbc3af5b51982a72b5, ASSIGN}] 2024-11-14T09:28:54,311 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=6e1713d974bb85bbc3af5b51982a72b5, ASSIGN 2024-11-14T09:28:54,314 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=6e1713d974bb85bbc3af5b51982a72b5, ASSIGN; state=OFFLINE, location=83f56b55f2af,43205,1731576532097; forceNewPlan=false, retain=false 2024-11-14T09:28:54,465 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=6e1713d974bb85bbc3af5b51982a72b5, regionState=OPENING, regionLocation=83f56b55f2af,43205,1731576532097 2024-11-14T09:28:54,470 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=6e1713d974bb85bbc3af5b51982a72b5, ASSIGN because future has completed 2024-11-14T09:28:54,471 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6e1713d974bb85bbc3af5b51982a72b5, server=83f56b55f2af,43205,1731576532097}] 2024-11-14T09:28:54,632 INFO [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testSlowSyncLogRolling,,1731576534206.6e1713d974bb85bbc3af5b51982a72b5. 2024-11-14T09:28:54,633 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 6e1713d974bb85bbc3af5b51982a72b5, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1731576534206.6e1713d974bb85bbc3af5b51982a72b5.', STARTKEY => '', ENDKEY => ''} 2024-11-14T09:28:54,634 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testSlowSyncLogRolling 6e1713d974bb85bbc3af5b51982a72b5 2024-11-14T09:28:54,634 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1731576534206.6e1713d974bb85bbc3af5b51982a72b5.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T09:28:54,634 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 6e1713d974bb85bbc3af5b51982a72b5 2024-11-14T09:28:54,634 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 6e1713d974bb85bbc3af5b51982a72b5 2024-11-14T09:28:54,637 INFO [StoreOpener-6e1713d974bb85bbc3af5b51982a72b5-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 6e1713d974bb85bbc3af5b51982a72b5 2024-11-14T09:28:54,640 INFO [StoreOpener-6e1713d974bb85bbc3af5b51982a72b5-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6e1713d974bb85bbc3af5b51982a72b5 columnFamilyName info 2024-11-14T09:28:54,640 DEBUG [StoreOpener-6e1713d974bb85bbc3af5b51982a72b5-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:28:54,641 INFO [StoreOpener-6e1713d974bb85bbc3af5b51982a72b5-1 {}] regionserver.HStore(327): Store=6e1713d974bb85bbc3af5b51982a72b5/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T09:28:54,641 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 6e1713d974bb85bbc3af5b51982a72b5 2024-11-14T09:28:54,643 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/data/default/TestLogRolling-testSlowSyncLogRolling/6e1713d974bb85bbc3af5b51982a72b5 2024-11-14T09:28:54,643 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/data/default/TestLogRolling-testSlowSyncLogRolling/6e1713d974bb85bbc3af5b51982a72b5 2024-11-14T09:28:54,644 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 6e1713d974bb85bbc3af5b51982a72b5 2024-11-14T09:28:54,644 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 6e1713d974bb85bbc3af5b51982a72b5 2024-11-14T09:28:54,647 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 6e1713d974bb85bbc3af5b51982a72b5 2024-11-14T09:28:54,651 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/data/default/TestLogRolling-testSlowSyncLogRolling/6e1713d974bb85bbc3af5b51982a72b5/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T09:28:54,652 INFO [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 6e1713d974bb85bbc3af5b51982a72b5; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=740650, jitterRate=-0.05821534991264343}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-14T09:28:54,652 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 6e1713d974bb85bbc3af5b51982a72b5 2024-11-14T09:28:54,654 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 6e1713d974bb85bbc3af5b51982a72b5: Running coprocessor pre-open hook at 1731576534634Writing region info on filesystem at 1731576534634Initializing all the Stores at 1731576534636 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731576534636Cleaning up temporary data from old regions at 1731576534644 (+8 ms)Running coprocessor post-open hooks at 1731576534652 (+8 ms)Region opened successfully at 1731576534654 (+2 ms) 2024-11-14T09:28:54,657 INFO [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testSlowSyncLogRolling,,1731576534206.6e1713d974bb85bbc3af5b51982a72b5., pid=6, masterSystemTime=1731576534625 2024-11-14T09:28:54,661 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testSlowSyncLogRolling,,1731576534206.6e1713d974bb85bbc3af5b51982a72b5. 2024-11-14T09:28:54,661 INFO [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testSlowSyncLogRolling,,1731576534206.6e1713d974bb85bbc3af5b51982a72b5. 2024-11-14T09:28:54,662 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=6e1713d974bb85bbc3af5b51982a72b5, regionState=OPEN, openSeqNum=2, regionLocation=83f56b55f2af,43205,1731576532097 2024-11-14T09:28:54,666 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6e1713d974bb85bbc3af5b51982a72b5, server=83f56b55f2af,43205,1731576532097 because future has completed 2024-11-14T09:28:54,674 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-14T09:28:54,674 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 6e1713d974bb85bbc3af5b51982a72b5, server=83f56b55f2af,43205,1731576532097 in 198 msec 2024-11-14T09:28:54,680 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-14T09:28:54,680 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=6e1713d974bb85bbc3af5b51982a72b5, ASSIGN in 366 msec 2024-11-14T09:28:54,681 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-14T09:28:54,682 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731576534681"}]},"ts":"1731576534681"} 2024-11-14T09:28:54,685 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLED in hbase:meta 2024-11-14T09:28:54,687 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-14T09:28:54,690 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling in 474 msec 2024-11-14T09:28:59,343 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-14T09:28:59,399 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-14T09:28:59,401 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testSlowSyncLogRolling' 2024-11-14T09:29:01,794 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-14T09:29:01,795 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-14T09:29:01,797 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-14T09:29:01,797 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-14T09:29:01,799 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-14T09:29:01,799 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-14T09:29:01,799 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-14T09:29:01,799 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-14T09:29:04,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41035 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-14T09:29:04,245 INFO [RPCClient-NioEventLoopGroup-4-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testSlowSyncLogRolling completed 2024-11-14T09:29:04,249 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testSlowSyncLogRolling,, stopping at row=TestLogRolling-testSlowSyncLogRolling ,, for max=2147483647 with caching=100 2024-11-14T09:29:04,256 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testSlowSyncLogRolling 2024-11-14T09:29:04,257 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testSlowSyncLogRolling,,1731576534206.6e1713d974bb85bbc3af5b51982a72b5. 2024-11-14T09:29:04,258 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 83f56b55f2af%2C43205%2C1731576532097.1731576544258 2024-11-14T09:29:04,267 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:29:04,267 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:29:04,267 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:29:04,268 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:29:04,268 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:29:04,268 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/WALs/83f56b55f2af,43205,1731576532097/83f56b55f2af%2C43205%2C1731576532097.1731576533375 with entries=1, filesize=443 B; new WAL /user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/WALs/83f56b55f2af,43205,1731576532097/83f56b55f2af%2C43205%2C1731576532097.1731576544258 2024-11-14T09:29:04,270 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41647:41647),(127.0.0.1/127.0.0.1:44137:44137)] 2024-11-14T09:29:04,270 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/WALs/83f56b55f2af,43205,1731576532097/83f56b55f2af%2C43205%2C1731576532097.1731576533375 is not closed yet, will try archiving it next time 2024-11-14T09:29:04,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32977 is added to blk_1073741833_1009 (size=451) 2024-11-14T09:29:04,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33135 is added to blk_1073741833_1009 (size=451) 2024-11-14T09:29:04,273 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/WALs/83f56b55f2af,43205,1731576532097/83f56b55f2af%2C43205%2C1731576532097.1731576533375 to hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/oldWALs/83f56b55f2af%2C43205%2C1731576532097.1731576533375 2024-11-14T09:29:04,279 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testSlowSyncLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testSlowSyncLogRolling,,1731576534206.6e1713d974bb85bbc3af5b51982a72b5., hostname=83f56b55f2af,43205,1731576532097, seqNum=2] 2024-11-14T09:29:16,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43205 {}] regionserver.HRegion(8855): Flush requested on 6e1713d974bb85bbc3af5b51982a72b5 2024-11-14T09:29:16,321 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 6e1713d974bb85bbc3af5b51982a72b5 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-14T09:29:16,408 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/data/default/TestLogRolling-testSlowSyncLogRolling/6e1713d974bb85bbc3af5b51982a72b5/.tmp/info/06d9882527b14a4981d08a27972e12fc is 1080, key is row0001/info:/1731576544282/Put/seqid=0 2024-11-14T09:29:16,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33135 is added to blk_1073741838_1014 (size=12509) 2024-11-14T09:29:16,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32977 is added to blk_1073741838_1014 (size=12509) 2024-11-14T09:29:16,440 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/data/default/TestLogRolling-testSlowSyncLogRolling/6e1713d974bb85bbc3af5b51982a72b5/.tmp/info/06d9882527b14a4981d08a27972e12fc 2024-11-14T09:29:16,502 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/data/default/TestLogRolling-testSlowSyncLogRolling/6e1713d974bb85bbc3af5b51982a72b5/.tmp/info/06d9882527b14a4981d08a27972e12fc as hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/data/default/TestLogRolling-testSlowSyncLogRolling/6e1713d974bb85bbc3af5b51982a72b5/info/06d9882527b14a4981d08a27972e12fc 2024-11-14T09:29:16,528 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/data/default/TestLogRolling-testSlowSyncLogRolling/6e1713d974bb85bbc3af5b51982a72b5/info/06d9882527b14a4981d08a27972e12fc, entries=7, sequenceid=11, filesize=12.2 K 2024-11-14T09:29:16,543 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 6e1713d974bb85bbc3af5b51982a72b5 in 219ms, sequenceid=11, compaction requested=false 2024-11-14T09:29:16,544 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 6e1713d974bb85bbc3af5b51982a72b5: 2024-11-14T09:29:20,425 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-14T09:29:24,336 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 83f56b55f2af%2C43205%2C1731576532097.1731576564336 2024-11-14T09:29:24,546 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 207 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33135,DS-ae795588-c0e7-4adc-aa9e-e383c97c62c4,DISK], DatanodeInfoWithStorage[127.0.0.1:32977,DS-fd3d3121-ce85-47e9-a21d-1c9cbcfa9d90,DISK]] 2024-11-14T09:29:24,546 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:29:24,547 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:29:24,547 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:29:24,547 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:29:24,547 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:29:24,547 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/WALs/83f56b55f2af,43205,1731576532097/83f56b55f2af%2C43205%2C1731576532097.1731576544258 with entries=12, filesize=12.10 KB; new WAL /user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/WALs/83f56b55f2af,43205,1731576532097/83f56b55f2af%2C43205%2C1731576532097.1731576564336 2024-11-14T09:29:24,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32977 is added to blk_1073741837_1013 (size=12399) 2024-11-14T09:29:24,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33135 is added to blk_1073741837_1013 (size=12399) 2024-11-14T09:29:24,556 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41647:41647),(127.0.0.1/127.0.0.1:44137:44137)] 2024-11-14T09:29:24,760 INFO [FSHLog-0-hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a-prefix:83f56b55f2af,43205,1731576532097 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33135,DS-ae795588-c0e7-4adc-aa9e-e383c97c62c4,DISK], DatanodeInfoWithStorage[127.0.0.1:32977,DS-fd3d3121-ce85-47e9-a21d-1c9cbcfa9d90,DISK]] 2024-11-14T09:29:26,964 INFO [FSHLog-0-hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a-prefix:83f56b55f2af,43205,1731576532097 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33135,DS-ae795588-c0e7-4adc-aa9e-e383c97c62c4,DISK], DatanodeInfoWithStorage[127.0.0.1:32977,DS-fd3d3121-ce85-47e9-a21d-1c9cbcfa9d90,DISK]] 2024-11-14T09:29:29,168 INFO [FSHLog-0-hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a-prefix:83f56b55f2af,43205,1731576532097 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33135,DS-ae795588-c0e7-4adc-aa9e-e383c97c62c4,DISK], DatanodeInfoWithStorage[127.0.0.1:32977,DS-fd3d3121-ce85-47e9-a21d-1c9cbcfa9d90,DISK]] 2024-11-14T09:29:31,372 INFO [FSHLog-0-hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a-prefix:83f56b55f2af,43205,1731576532097 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33135,DS-ae795588-c0e7-4adc-aa9e-e383c97c62c4,DISK], DatanodeInfoWithStorage[127.0.0.1:32977,DS-fd3d3121-ce85-47e9-a21d-1c9cbcfa9d90,DISK]] 2024-11-14T09:29:31,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43205 {}] regionserver.HRegion(8855): Flush requested on 6e1713d974bb85bbc3af5b51982a72b5 2024-11-14T09:29:31,373 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 6e1713d974bb85bbc3af5b51982a72b5 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-14T09:29:31,575 INFO [FSHLog-0-hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a-prefix:83f56b55f2af,43205,1731576532097 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33135,DS-ae795588-c0e7-4adc-aa9e-e383c97c62c4,DISK], DatanodeInfoWithStorage[127.0.0.1:32977,DS-fd3d3121-ce85-47e9-a21d-1c9cbcfa9d90,DISK]] 2024-11-14T09:29:31,581 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/data/default/TestLogRolling-testSlowSyncLogRolling/6e1713d974bb85bbc3af5b51982a72b5/.tmp/info/1bcbbb9263114a7b8da44a50bacc79ca is 1080, key is row0008/info:/1731576558324/Put/seqid=0 2024-11-14T09:29:31,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32977 is added to blk_1073741840_1016 (size=12509) 2024-11-14T09:29:31,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33135 is added to blk_1073741840_1016 (size=12509) 2024-11-14T09:29:31,591 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/data/default/TestLogRolling-testSlowSyncLogRolling/6e1713d974bb85bbc3af5b51982a72b5/.tmp/info/1bcbbb9263114a7b8da44a50bacc79ca 2024-11-14T09:29:31,603 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/data/default/TestLogRolling-testSlowSyncLogRolling/6e1713d974bb85bbc3af5b51982a72b5/.tmp/info/1bcbbb9263114a7b8da44a50bacc79ca as hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/data/default/TestLogRolling-testSlowSyncLogRolling/6e1713d974bb85bbc3af5b51982a72b5/info/1bcbbb9263114a7b8da44a50bacc79ca 2024-11-14T09:29:31,613 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/data/default/TestLogRolling-testSlowSyncLogRolling/6e1713d974bb85bbc3af5b51982a72b5/info/1bcbbb9263114a7b8da44a50bacc79ca, entries=7, sequenceid=21, filesize=12.2 K 2024-11-14T09:29:31,815 INFO [FSHLog-0-hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a-prefix:83f56b55f2af,43205,1731576532097 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33135,DS-ae795588-c0e7-4adc-aa9e-e383c97c62c4,DISK], DatanodeInfoWithStorage[127.0.0.1:32977,DS-fd3d3121-ce85-47e9-a21d-1c9cbcfa9d90,DISK]] 2024-11-14T09:29:31,815 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 6e1713d974bb85bbc3af5b51982a72b5 in 442ms, sequenceid=21, compaction requested=false 2024-11-14T09:29:31,815 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 6e1713d974bb85bbc3af5b51982a72b5: 2024-11-14T09:29:31,816 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=24.4 K, sizeToCheck=16.0 K 2024-11-14T09:29:31,816 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T09:29:31,817 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/data/default/TestLogRolling-testSlowSyncLogRolling/6e1713d974bb85bbc3af5b51982a72b5/info/06d9882527b14a4981d08a27972e12fc because midkey is the same as first or last row 2024-11-14T09:29:33,577 INFO [FSHLog-0-hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a-prefix:83f56b55f2af,43205,1731576532097 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33135,DS-ae795588-c0e7-4adc-aa9e-e383c97c62c4,DISK], DatanodeInfoWithStorage[127.0.0.1:32977,DS-fd3d3121-ce85-47e9-a21d-1c9cbcfa9d90,DISK]] 2024-11-14T09:29:34,100 INFO [master/83f56b55f2af:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-14T09:29:34,100 INFO [master/83f56b55f2af:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-14T09:29:35,781 INFO [FSHLog-0-hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a-prefix:83f56b55f2af,43205,1731576532097 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33135,DS-ae795588-c0e7-4adc-aa9e-e383c97c62c4,DISK], DatanodeInfoWithStorage[127.0.0.1:32977,DS-fd3d3121-ce85-47e9-a21d-1c9cbcfa9d90,DISK]] 2024-11-14T09:29:35,783 WARN [FSHLog-0-hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a-prefix:83f56b55f2af,43205,1731576532097 {}] wal.AbstractFSWAL(2201): Requesting log roll because we exceeded slow sync threshold; count=8, threshold=5, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33135,DS-ae795588-c0e7-4adc-aa9e-e383c97c62c4,DISK], DatanodeInfoWithStorage[127.0.0.1:32977,DS-fd3d3121-ce85-47e9-a21d-1c9cbcfa9d90,DISK]] 2024-11-14T09:29:35,784 DEBUG [regionserver/83f56b55f2af:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 83f56b55f2af%2C43205%2C1731576532097:(num 1731576564336) roll requested 2024-11-14T09:29:35,785 INFO [regionserver/83f56b55f2af:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 83f56b55f2af%2C43205%2C1731576532097.1731576575785 2024-11-14T09:29:35,993 INFO [regionserver/83f56b55f2af:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 206 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33135,DS-ae795588-c0e7-4adc-aa9e-e383c97c62c4,DISK], DatanodeInfoWithStorage[127.0.0.1:32977,DS-fd3d3121-ce85-47e9-a21d-1c9cbcfa9d90,DISK]] 2024-11-14T09:29:35,994 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:29:35,994 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:29:35,994 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:29:35,994 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:29:35,994 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:29:35,994 INFO [regionserver/83f56b55f2af:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/WALs/83f56b55f2af,43205,1731576532097/83f56b55f2af%2C43205%2C1731576532097.1731576564336 with entries=8, filesize=7.55 KB; new WAL /user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/WALs/83f56b55f2af,43205,1731576532097/83f56b55f2af%2C43205%2C1731576532097.1731576575785 2024-11-14T09:29:35,995 DEBUG [regionserver/83f56b55f2af:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41647:41647),(127.0.0.1/127.0.0.1:44137:44137)] 2024-11-14T09:29:35,996 DEBUG [regionserver/83f56b55f2af:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/WALs/83f56b55f2af,43205,1731576532097/83f56b55f2af%2C43205%2C1731576532097.1731576564336 is not closed yet, will try archiving it next time 2024-11-14T09:29:35,996 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/WALs/83f56b55f2af,43205,1731576532097/83f56b55f2af%2C43205%2C1731576532097.1731576544258 to hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/oldWALs/83f56b55f2af%2C43205%2C1731576532097.1731576544258 2024-11-14T09:29:35,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32977 is added to blk_1073741839_1015 (size=7739) 2024-11-14T09:29:35,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33135 is added to blk_1073741839_1015 (size=7739) 2024-11-14T09:29:37,985 INFO [FSHLog-0-hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a-prefix:83f56b55f2af,43205,1731576532097 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33135,DS-ae795588-c0e7-4adc-aa9e-e383c97c62c4,DISK], DatanodeInfoWithStorage[127.0.0.1:32977,DS-fd3d3121-ce85-47e9-a21d-1c9cbcfa9d90,DISK]] 2024-11-14T09:29:39,634 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 6e1713d974bb85bbc3af5b51982a72b5, had cached 0 bytes from a total of 25018 2024-11-14T09:29:40,189 INFO [FSHLog-0-hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a-prefix:83f56b55f2af,43205,1731576532097 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33135,DS-ae795588-c0e7-4adc-aa9e-e383c97c62c4,DISK], DatanodeInfoWithStorage[127.0.0.1:32977,DS-fd3d3121-ce85-47e9-a21d-1c9cbcfa9d90,DISK]] 2024-11-14T09:29:42,393 INFO [FSHLog-0-hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a-prefix:83f56b55f2af,43205,1731576532097 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33135,DS-ae795588-c0e7-4adc-aa9e-e383c97c62c4,DISK], DatanodeInfoWithStorage[127.0.0.1:32977,DS-fd3d3121-ce85-47e9-a21d-1c9cbcfa9d90,DISK]] 2024-11-14T09:29:44,597 INFO [FSHLog-0-hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a-prefix:83f56b55f2af,43205,1731576532097 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33135,DS-ae795588-c0e7-4adc-aa9e-e383c97c62c4,DISK], DatanodeInfoWithStorage[127.0.0.1:32977,DS-fd3d3121-ce85-47e9-a21d-1c9cbcfa9d90,DISK]] 2024-11-14T09:29:46,599 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-14T09:29:46,600 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 83f56b55f2af%2C43205%2C1731576532097.1731576586600 2024-11-14T09:29:50,426 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-14T09:29:51,609 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 5005 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33135,DS-ae795588-c0e7-4adc-aa9e-e383c97c62c4,DISK], DatanodeInfoWithStorage[127.0.0.1:32977,DS-fd3d3121-ce85-47e9-a21d-1c9cbcfa9d90,DISK]] 2024-11-14T09:29:51,611 WARN [Time-limited test {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5005 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33135,DS-ae795588-c0e7-4adc-aa9e-e383c97c62c4,DISK], DatanodeInfoWithStorage[127.0.0.1:32977,DS-fd3d3121-ce85-47e9-a21d-1c9cbcfa9d90,DISK]] 2024-11-14T09:29:51,611 DEBUG [regionserver/83f56b55f2af:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 83f56b55f2af%2C43205%2C1731576532097:(num 1731576586600) roll requested 2024-11-14T09:29:51,612 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:29:51,612 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:29:51,612 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:29:51,612 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:29:51,612 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:29:51,612 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/WALs/83f56b55f2af,43205,1731576532097/83f56b55f2af%2C43205%2C1731576532097.1731576575785 with entries=4, filesize=4.63 KB; new WAL /user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/WALs/83f56b55f2af,43205,1731576532097/83f56b55f2af%2C43205%2C1731576532097.1731576586600 2024-11-14T09:29:51,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32977 is added to blk_1073741841_1017 (size=4753) 2024-11-14T09:29:51,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33135 is added to blk_1073741841_1017 (size=4753) 2024-11-14T09:29:51,620 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44137:44137),(127.0.0.1/127.0.0.1:41647:41647)] 2024-11-14T09:29:51,621 INFO [regionserver/83f56b55f2af:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 83f56b55f2af%2C43205%2C1731576532097.1731576591621 2024-11-14T09:29:56,624 INFO [FSHLog-0-hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a-prefix:83f56b55f2af,43205,1731576532097 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:32977,DS-fd3d3121-ce85-47e9-a21d-1c9cbcfa9d90,DISK], DatanodeInfoWithStorage[127.0.0.1:33135,DS-ae795588-c0e7-4adc-aa9e-e383c97c62c4,DISK]] 2024-11-14T09:29:56,624 WARN [FSHLog-0-hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a-prefix:83f56b55f2af,43205,1731576532097 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:32977,DS-fd3d3121-ce85-47e9-a21d-1c9cbcfa9d90,DISK], DatanodeInfoWithStorage[127.0.0.1:33135,DS-ae795588-c0e7-4adc-aa9e-e383c97c62c4,DISK]] 2024-11-14T09:29:56,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43205 {}] regionserver.HRegion(8855): Flush requested on 6e1713d974bb85bbc3af5b51982a72b5 2024-11-14T09:29:56,624 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 6e1713d974bb85bbc3af5b51982a72b5 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-14T09:29:56,630 INFO [regionserver/83f56b55f2af:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5007 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:32977,DS-fd3d3121-ce85-47e9-a21d-1c9cbcfa9d90,DISK], DatanodeInfoWithStorage[127.0.0.1:33135,DS-ae795588-c0e7-4adc-aa9e-e383c97c62c4,DISK]] 2024-11-14T09:29:56,630 WARN [regionserver/83f56b55f2af:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5007 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:32977,DS-fd3d3121-ce85-47e9-a21d-1c9cbcfa9d90,DISK], DatanodeInfoWithStorage[127.0.0.1:33135,DS-ae795588-c0e7-4adc-aa9e-e383c97c62c4,DISK]] 2024-11-14T09:29:58,625 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-14T09:30:01,627 INFO [FSHLog-0-hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a-prefix:83f56b55f2af,43205,1731576532097 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:32977,DS-fd3d3121-ce85-47e9-a21d-1c9cbcfa9d90,DISK], DatanodeInfoWithStorage[127.0.0.1:33135,DS-ae795588-c0e7-4adc-aa9e-e383c97c62c4,DISK]] 2024-11-14T09:30:01,627 WARN [FSHLog-0-hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a-prefix:83f56b55f2af,43205,1731576532097 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:32977,DS-fd3d3121-ce85-47e9-a21d-1c9cbcfa9d90,DISK], DatanodeInfoWithStorage[127.0.0.1:33135,DS-ae795588-c0e7-4adc-aa9e-e383c97c62c4,DISK]] 2024-11-14T09:30:01,627 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:30:01,628 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:30:01,628 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:30:01,630 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:30:01,630 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:30:01,631 INFO [regionserver/83f56b55f2af:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/WALs/83f56b55f2af,43205,1731576532097/83f56b55f2af%2C43205%2C1731576532097.1731576586600 with entries=2, filesize=1.52 KB; new WAL /user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/WALs/83f56b55f2af,43205,1731576532097/83f56b55f2af%2C43205%2C1731576532097.1731576591621 2024-11-14T09:30:01,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33135 is added to blk_1073741842_1018 (size=1569) 2024-11-14T09:30:01,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32977 is added to blk_1073741842_1018 (size=1569) 2024-11-14T09:30:01,643 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/data/default/TestLogRolling-testSlowSyncLogRolling/6e1713d974bb85bbc3af5b51982a72b5/.tmp/info/006c536584a945228a97e6a05ce1083d is 1080, key is row0015/info:/1731576573375/Put/seqid=0 2024-11-14T09:30:01,651 DEBUG [regionserver/83f56b55f2af:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41647:41647),(127.0.0.1/127.0.0.1:44137:44137)] 2024-11-14T09:30:01,652 DEBUG [regionserver/83f56b55f2af:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/WALs/83f56b55f2af,43205,1731576532097/83f56b55f2af%2C43205%2C1731576532097.1731576586600 is not closed yet, will try archiving it next time 2024-11-14T09:30:01,652 DEBUG [regionserver/83f56b55f2af:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 83f56b55f2af%2C43205%2C1731576532097:(num 1731576591621) roll requested 2024-11-14T09:30:01,652 INFO [regionserver/83f56b55f2af:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 83f56b55f2af%2C43205%2C1731576532097.1731576601652 2024-11-14T09:30:01,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33135 is added to blk_1073741844_1020 (size=12509) 2024-11-14T09:30:01,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32977 is added to blk_1073741844_1020 (size=12509) 2024-11-14T09:30:01,684 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=31 (bloomFilter=true), to=hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/data/default/TestLogRolling-testSlowSyncLogRolling/6e1713d974bb85bbc3af5b51982a72b5/.tmp/info/006c536584a945228a97e6a05ce1083d 2024-11-14T09:30:01,711 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/data/default/TestLogRolling-testSlowSyncLogRolling/6e1713d974bb85bbc3af5b51982a72b5/.tmp/info/006c536584a945228a97e6a05ce1083d as hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/data/default/TestLogRolling-testSlowSyncLogRolling/6e1713d974bb85bbc3af5b51982a72b5/info/006c536584a945228a97e6a05ce1083d 2024-11-14T09:30:01,730 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/data/default/TestLogRolling-testSlowSyncLogRolling/6e1713d974bb85bbc3af5b51982a72b5/info/006c536584a945228a97e6a05ce1083d, entries=7, sequenceid=31, filesize=12.2 K 2024-11-14T09:30:06,669 INFO [regionserver/83f56b55f2af:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5005 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33135,DS-ae795588-c0e7-4adc-aa9e-e383c97c62c4,DISK], DatanodeInfoWithStorage[127.0.0.1:32977,DS-fd3d3121-ce85-47e9-a21d-1c9cbcfa9d90,DISK]] 2024-11-14T09:30:06,669 WARN [regionserver/83f56b55f2af:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5005 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33135,DS-ae795588-c0e7-4adc-aa9e-e383c97c62c4,DISK], DatanodeInfoWithStorage[127.0.0.1:32977,DS-fd3d3121-ce85-47e9-a21d-1c9cbcfa9d90,DISK]] 2024-11-14T09:30:06,732 INFO [FSHLog-0-hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a-prefix:83f56b55f2af,43205,1731576532097 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33135,DS-ae795588-c0e7-4adc-aa9e-e383c97c62c4,DISK], DatanodeInfoWithStorage[127.0.0.1:32977,DS-fd3d3121-ce85-47e9-a21d-1c9cbcfa9d90,DISK]] 2024-11-14T09:30:06,732 WARN [FSHLog-0-hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a-prefix:83f56b55f2af,43205,1731576532097 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33135,DS-ae795588-c0e7-4adc-aa9e-e383c97c62c4,DISK], DatanodeInfoWithStorage[127.0.0.1:32977,DS-fd3d3121-ce85-47e9-a21d-1c9cbcfa9d90,DISK]] 2024-11-14T09:30:06,732 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 6e1713d974bb85bbc3af5b51982a72b5 in 10108ms, sequenceid=31, compaction requested=true 2024-11-14T09:30:06,732 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:30:06,732 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 6e1713d974bb85bbc3af5b51982a72b5: 2024-11-14T09:30:06,732 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:30:06,733 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:30:06,733 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=36.6 K, sizeToCheck=16.0 K 2024-11-14T09:30:06,733 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:30:06,733 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T09:30:06,733 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:30:06,733 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/data/default/TestLogRolling-testSlowSyncLogRolling/6e1713d974bb85bbc3af5b51982a72b5/info/06d9882527b14a4981d08a27972e12fc because midkey is the same as first or last row 2024-11-14T09:30:06,733 INFO [regionserver/83f56b55f2af:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/WALs/83f56b55f2af,43205,1731576532097/83f56b55f2af%2C43205%2C1731576532097.1731576591621 with entries=1, filesize=430 B; new WAL /user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/WALs/83f56b55f2af,43205,1731576532097/83f56b55f2af%2C43205%2C1731576532097.1731576601652 2024-11-14T09:30:06,734 DEBUG [regionserver/83f56b55f2af:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44137:44137),(127.0.0.1/127.0.0.1:41647:41647)] 2024-11-14T09:30:06,734 DEBUG [regionserver/83f56b55f2af:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/WALs/83f56b55f2af,43205,1731576532097/83f56b55f2af%2C43205%2C1731576532097.1731576591621 is not closed yet, will try archiving it next time 2024-11-14T09:30:06,734 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/WALs/83f56b55f2af,43205,1731576532097/83f56b55f2af%2C43205%2C1731576532097.1731576564336 to hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/oldWALs/83f56b55f2af%2C43205%2C1731576532097.1731576564336 2024-11-14T09:30:06,735 DEBUG [regionserver/83f56b55f2af:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 83f56b55f2af%2C43205%2C1731576532097:(num 1731576606734) roll requested 2024-11-14T09:30:06,735 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 83f56b55f2af%2C43205%2C1731576532097.1731576606734 2024-11-14T09:30:06,735 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6e1713d974bb85bbc3af5b51982a72b5:info, priority=-2147483648, current under compaction store size is 1 2024-11-14T09:30:06,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32977 is added to blk_1073741843_1019 (size=438) 2024-11-14T09:30:06,737 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/WALs/83f56b55f2af,43205,1731576532097/83f56b55f2af%2C43205%2C1731576532097.1731576575785 to hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/oldWALs/83f56b55f2af%2C43205%2C1731576532097.1731576575785 2024-11-14T09:30:06,738 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T09:30:06,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33135 is added to blk_1073741843_1019 (size=438) 2024-11-14T09:30:06,739 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/WALs/83f56b55f2af,43205,1731576532097/83f56b55f2af%2C43205%2C1731576532097.1731576586600 to hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/oldWALs/83f56b55f2af%2C43205%2C1731576532097.1731576586600 2024-11-14T09:30:06,740 DEBUG [RS:0;83f56b55f2af:43205-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-14T09:30:06,741 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/WALs/83f56b55f2af,43205,1731576532097/83f56b55f2af%2C43205%2C1731576532097.1731576591621 to hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/oldWALs/83f56b55f2af%2C43205%2C1731576532097.1731576591621 2024-11-14T09:30:06,743 DEBUG [RS:0;83f56b55f2af:43205-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37527 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-14T09:30:06,745 DEBUG [RS:0;83f56b55f2af:43205-shortCompactions-0 {}] regionserver.HStore(1541): 6e1713d974bb85bbc3af5b51982a72b5/info is initiating minor compaction (all files) 2024-11-14T09:30:06,745 INFO [RS:0;83f56b55f2af:43205-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 6e1713d974bb85bbc3af5b51982a72b5/info in TestLogRolling-testSlowSyncLogRolling,,1731576534206.6e1713d974bb85bbc3af5b51982a72b5. 2024-11-14T09:30:06,745 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:30:06,746 INFO [RS:0;83f56b55f2af:43205-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/data/default/TestLogRolling-testSlowSyncLogRolling/6e1713d974bb85bbc3af5b51982a72b5/info/06d9882527b14a4981d08a27972e12fc, hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/data/default/TestLogRolling-testSlowSyncLogRolling/6e1713d974bb85bbc3af5b51982a72b5/info/1bcbbb9263114a7b8da44a50bacc79ca, hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/data/default/TestLogRolling-testSlowSyncLogRolling/6e1713d974bb85bbc3af5b51982a72b5/info/006c536584a945228a97e6a05ce1083d] into tmpdir=hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/data/default/TestLogRolling-testSlowSyncLogRolling/6e1713d974bb85bbc3af5b51982a72b5/.tmp, totalSize=36.6 K 2024-11-14T09:30:06,746 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:30:06,746 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:30:06,746 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:30:06,746 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:30:06,747 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/WALs/83f56b55f2af,43205,1731576532097/83f56b55f2af%2C43205%2C1731576532097.1731576601652 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/WALs/83f56b55f2af,43205,1731576532097/83f56b55f2af%2C43205%2C1731576532097.1731576606734 2024-11-14T09:30:06,747 DEBUG [RS:0;83f56b55f2af:43205-shortCompactions-0 {}] compactions.Compactor(225): Compacting 06d9882527b14a4981d08a27972e12fc, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731576544282 2024-11-14T09:30:06,748 DEBUG [RS:0;83f56b55f2af:43205-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1bcbbb9263114a7b8da44a50bacc79ca, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1731576558324 2024-11-14T09:30:06,748 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41647:41647),(127.0.0.1/127.0.0.1:44137:44137)] 2024-11-14T09:30:06,748 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/WALs/83f56b55f2af,43205,1731576532097/83f56b55f2af%2C43205%2C1731576532097.1731576601652 is not closed yet, will try archiving it next time 2024-11-14T09:30:06,748 INFO [regionserver/83f56b55f2af:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 83f56b55f2af%2C43205%2C1731576532097.1731576606748 2024-11-14T09:30:06,749 DEBUG [RS:0;83f56b55f2af:43205-shortCompactions-0 {}] compactions.Compactor(225): Compacting 006c536584a945228a97e6a05ce1083d, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=31, earliestPutTs=1731576573375 2024-11-14T09:30:06,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33135 is added to blk_1073741845_1021 (size=93) 2024-11-14T09:30:06,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32977 is added to blk_1073741845_1021 (size=93) 2024-11-14T09:30:06,759 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:30:06,759 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:30:06,759 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:30:06,759 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:30:06,763 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:30:06,764 INFO [regionserver/83f56b55f2af:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/WALs/83f56b55f2af,43205,1731576532097/83f56b55f2af%2C43205%2C1731576532097.1731576606734 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/WALs/83f56b55f2af,43205,1731576532097/83f56b55f2af%2C43205%2C1731576532097.1731576606748 2024-11-14T09:30:06,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32977 is added to blk_1073741846_1022 (size=1258) 2024-11-14T09:30:06,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33135 is added to blk_1073741846_1022 (size=1258) 2024-11-14T09:30:06,767 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/WALs/83f56b55f2af,43205,1731576532097/83f56b55f2af%2C43205%2C1731576532097.1731576601652 is not closed yet, will try archiving it next time 2024-11-14T09:30:06,773 DEBUG [regionserver/83f56b55f2af:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41647:41647),(127.0.0.1/127.0.0.1:44137:44137)] 2024-11-14T09:30:06,773 DEBUG [regionserver/83f56b55f2af:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/WALs/83f56b55f2af,43205,1731576532097/83f56b55f2af%2C43205%2C1731576532097.1731576601652 is not closed yet, will try archiving it next time 2024-11-14T09:30:06,785 INFO [RS:0;83f56b55f2af:43205-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6e1713d974bb85bbc3af5b51982a72b5#info#compaction#3 average throughput is 10.77 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-14T09:30:06,786 DEBUG [RS:0;83f56b55f2af:43205-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/data/default/TestLogRolling-testSlowSyncLogRolling/6e1713d974bb85bbc3af5b51982a72b5/.tmp/info/2ce29a64f38f4cb38e2536e75c0cc696 is 1080, key is row0001/info:/1731576544282/Put/seqid=0 2024-11-14T09:30:06,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33135 is added to blk_1073741848_1024 (size=27710) 2024-11-14T09:30:06,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32977 is added to blk_1073741848_1024 (size=27710) 2024-11-14T09:30:07,151 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/WALs/83f56b55f2af,43205,1731576532097/83f56b55f2af%2C43205%2C1731576532097.1731576601652 to hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/oldWALs/83f56b55f2af%2C43205%2C1731576532097.1731576601652 2024-11-14T09:30:07,206 DEBUG [RS:0;83f56b55f2af:43205-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/data/default/TestLogRolling-testSlowSyncLogRolling/6e1713d974bb85bbc3af5b51982a72b5/.tmp/info/2ce29a64f38f4cb38e2536e75c0cc696 as hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/data/default/TestLogRolling-testSlowSyncLogRolling/6e1713d974bb85bbc3af5b51982a72b5/info/2ce29a64f38f4cb38e2536e75c0cc696 2024-11-14T09:30:07,227 INFO [RS:0;83f56b55f2af:43205-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 6e1713d974bb85bbc3af5b51982a72b5/info of 6e1713d974bb85bbc3af5b51982a72b5 into 2ce29a64f38f4cb38e2536e75c0cc696(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-14T09:30:07,227 DEBUG [RS:0;83f56b55f2af:43205-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 6e1713d974bb85bbc3af5b51982a72b5: 2024-11-14T09:30:07,230 INFO [RS:0;83f56b55f2af:43205-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testSlowSyncLogRolling,,1731576534206.6e1713d974bb85bbc3af5b51982a72b5., storeName=6e1713d974bb85bbc3af5b51982a72b5/info, priority=13, startTime=1731576606735; duration=0sec 2024-11-14T09:30:07,230 DEBUG [RS:0;83f56b55f2af:43205-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-14T09:30:07,230 DEBUG [RS:0;83f56b55f2af:43205-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T09:30:07,230 DEBUG [RS:0;83f56b55f2af:43205-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/data/default/TestLogRolling-testSlowSyncLogRolling/6e1713d974bb85bbc3af5b51982a72b5/info/2ce29a64f38f4cb38e2536e75c0cc696 because midkey is the same as first or last row 2024-11-14T09:30:07,230 DEBUG [RS:0;83f56b55f2af:43205-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-14T09:30:07,231 DEBUG [RS:0;83f56b55f2af:43205-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T09:30:07,231 DEBUG [RS:0;83f56b55f2af:43205-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/data/default/TestLogRolling-testSlowSyncLogRolling/6e1713d974bb85bbc3af5b51982a72b5/info/2ce29a64f38f4cb38e2536e75c0cc696 because midkey is the same as first or last row 2024-11-14T09:30:07,231 DEBUG [RS:0;83f56b55f2af:43205-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-14T09:30:07,231 DEBUG [RS:0;83f56b55f2af:43205-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T09:30:07,231 DEBUG [RS:0;83f56b55f2af:43205-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/data/default/TestLogRolling-testSlowSyncLogRolling/6e1713d974bb85bbc3af5b51982a72b5/info/2ce29a64f38f4cb38e2536e75c0cc696 because midkey is the same as first or last row 2024-11-14T09:30:07,231 DEBUG [RS:0;83f56b55f2af:43205-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T09:30:07,231 DEBUG [RS:0;83f56b55f2af:43205-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6e1713d974bb85bbc3af5b51982a72b5:info 2024-11-14T09:30:18,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43205 {}] regionserver.HRegion(8855): Flush requested on 6e1713d974bb85bbc3af5b51982a72b5 2024-11-14T09:30:18,776 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 6e1713d974bb85bbc3af5b51982a72b5 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-14T09:30:18,784 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/data/default/TestLogRolling-testSlowSyncLogRolling/6e1713d974bb85bbc3af5b51982a72b5/.tmp/info/440297a43d8147049f0ee4e95bb72209 is 1080, key is row0022/info:/1731576606750/Put/seqid=0 2024-11-14T09:30:18,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32977 is added to blk_1073741849_1025 (size=12509) 2024-11-14T09:30:18,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33135 is added to blk_1073741849_1025 (size=12509) 2024-11-14T09:30:18,800 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/data/default/TestLogRolling-testSlowSyncLogRolling/6e1713d974bb85bbc3af5b51982a72b5/.tmp/info/440297a43d8147049f0ee4e95bb72209 2024-11-14T09:30:18,816 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/data/default/TestLogRolling-testSlowSyncLogRolling/6e1713d974bb85bbc3af5b51982a72b5/.tmp/info/440297a43d8147049f0ee4e95bb72209 as hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/data/default/TestLogRolling-testSlowSyncLogRolling/6e1713d974bb85bbc3af5b51982a72b5/info/440297a43d8147049f0ee4e95bb72209 2024-11-14T09:30:18,827 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/data/default/TestLogRolling-testSlowSyncLogRolling/6e1713d974bb85bbc3af5b51982a72b5/info/440297a43d8147049f0ee4e95bb72209, entries=7, sequenceid=42, filesize=12.2 K 2024-11-14T09:30:18,828 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 6e1713d974bb85bbc3af5b51982a72b5 in 52ms, sequenceid=42, compaction requested=false 2024-11-14T09:30:18,829 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 6e1713d974bb85bbc3af5b51982a72b5: 2024-11-14T09:30:18,829 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.3 K, sizeToCheck=16.0 K 2024-11-14T09:30:18,829 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T09:30:18,829 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/data/default/TestLogRolling-testSlowSyncLogRolling/6e1713d974bb85bbc3af5b51982a72b5/info/2ce29a64f38f4cb38e2536e75c0cc696 because midkey is the same as first or last row 2024-11-14T09:30:20,426 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-14T09:30:24,634 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 6e1713d974bb85bbc3af5b51982a72b5, had cached 0 bytes from a total of 40219 2024-11-14T09:30:26,787 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-14T09:30:26,787 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-14T09:30:26,788 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T09:30:26,793 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T09:30:26,793 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T09:30:26,793 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-14T09:30:26,793 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-14T09:30:26,793 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1767583037, stopped=false 2024-11-14T09:30:26,794 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=83f56b55f2af,41035,1731576531378 2024-11-14T09:30:26,796 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43205-0x10115cef0ca0001, quorum=127.0.0.1:56289, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-14T09:30:26,796 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41035-0x10115cef0ca0000, quorum=127.0.0.1:56289, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-14T09:30:26,796 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43205-0x10115cef0ca0001, quorum=127.0.0.1:56289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:30:26,796 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41035-0x10115cef0ca0000, quorum=127.0.0.1:56289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:30:26,796 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-14T09:30:26,796 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-14T09:30:26,796 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T09:30:26,796 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T09:30:26,797 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:41035-0x10115cef0ca0000, quorum=127.0.0.1:56289, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T09:30:26,797 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:43205-0x10115cef0ca0001, quorum=127.0.0.1:56289, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T09:30:26,797 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '83f56b55f2af,43205,1731576532097' ***** 2024-11-14T09:30:26,797 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-14T09:30:26,798 INFO [RS:0;83f56b55f2af:43205 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-14T09:30:26,798 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-14T09:30:26,798 INFO [RS:0;83f56b55f2af:43205 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-14T09:30:26,798 INFO [RS:0;83f56b55f2af:43205 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-14T09:30:26,798 INFO [RS:0;83f56b55f2af:43205 {}] regionserver.HRegionServer(3091): Received CLOSE for 6e1713d974bb85bbc3af5b51982a72b5 2024-11-14T09:30:26,799 INFO [RS:0;83f56b55f2af:43205 {}] regionserver.HRegionServer(959): stopping server 83f56b55f2af,43205,1731576532097 2024-11-14T09:30:26,799 INFO [RS:0;83f56b55f2af:43205 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-14T09:30:26,799 INFO [RS:0;83f56b55f2af:43205 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;83f56b55f2af:43205. 2024-11-14T09:30:26,799 DEBUG [RS:0;83f56b55f2af:43205 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T09:30:26,799 DEBUG [RS:0;83f56b55f2af:43205 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T09:30:26,799 INFO [RS:0;83f56b55f2af:43205 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-14T09:30:26,799 DEBUG [RS_CLOSE_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 6e1713d974bb85bbc3af5b51982a72b5, disabling compactions & flushes 2024-11-14T09:30:26,799 INFO [RS:0;83f56b55f2af:43205 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-14T09:30:26,799 INFO [RS:0;83f56b55f2af:43205 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-14T09:30:26,799 INFO [RS_CLOSE_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1731576534206.6e1713d974bb85bbc3af5b51982a72b5. 2024-11-14T09:30:26,799 DEBUG [RS_CLOSE_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1731576534206.6e1713d974bb85bbc3af5b51982a72b5. 2024-11-14T09:30:26,799 INFO [RS:0;83f56b55f2af:43205 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-14T09:30:26,799 DEBUG [RS_CLOSE_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1731576534206.6e1713d974bb85bbc3af5b51982a72b5. after waiting 0 ms 2024-11-14T09:30:26,800 DEBUG [RS_CLOSE_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1731576534206.6e1713d974bb85bbc3af5b51982a72b5. 2024-11-14T09:30:26,800 INFO [RS_CLOSE_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 6e1713d974bb85bbc3af5b51982a72b5 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-11-14T09:30:26,800 INFO [RS:0;83f56b55f2af:43205 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-14T09:30:26,800 DEBUG [RS:0;83f56b55f2af:43205 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 6e1713d974bb85bbc3af5b51982a72b5=TestLogRolling-testSlowSyncLogRolling,,1731576534206.6e1713d974bb85bbc3af5b51982a72b5.} 2024-11-14T09:30:26,800 DEBUG [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-14T09:30:26,800 INFO [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-14T09:30:26,800 DEBUG [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-14T09:30:26,800 DEBUG [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-14T09:30:26,800 DEBUG [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-14T09:30:26,800 DEBUG [RS:0;83f56b55f2af:43205 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 6e1713d974bb85bbc3af5b51982a72b5 2024-11-14T09:30:26,800 INFO [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.65 KB heapSize=3.67 KB 2024-11-14T09:30:26,806 DEBUG [RS_CLOSE_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/data/default/TestLogRolling-testSlowSyncLogRolling/6e1713d974bb85bbc3af5b51982a72b5/.tmp/info/9afbd9f09ef34ed299fa1aeb61503d54 is 1080, key is row0029/info:/1731576620778/Put/seqid=0 2024-11-14T09:30:26,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33135 is added to blk_1073741850_1026 (size=8193) 2024-11-14T09:30:26,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32977 is added to blk_1073741850_1026 (size=8193) 2024-11-14T09:30:26,816 INFO [RS_CLOSE_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/data/default/TestLogRolling-testSlowSyncLogRolling/6e1713d974bb85bbc3af5b51982a72b5/.tmp/info/9afbd9f09ef34ed299fa1aeb61503d54 2024-11-14T09:30:26,826 DEBUG [RS_CLOSE_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/data/default/TestLogRolling-testSlowSyncLogRolling/6e1713d974bb85bbc3af5b51982a72b5/.tmp/info/9afbd9f09ef34ed299fa1aeb61503d54 as hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/data/default/TestLogRolling-testSlowSyncLogRolling/6e1713d974bb85bbc3af5b51982a72b5/info/9afbd9f09ef34ed299fa1aeb61503d54 2024-11-14T09:30:26,827 DEBUG [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/data/hbase/meta/1588230740/.tmp/info/09cd43aa49f842b2b9e754ae37eab83d is 195, key is TestLogRolling-testSlowSyncLogRolling,,1731576534206.6e1713d974bb85bbc3af5b51982a72b5./info:regioninfo/1731576534662/Put/seqid=0 2024-11-14T09:30:26,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33135 is added to blk_1073741851_1027 (size=7016) 2024-11-14T09:30:26,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32977 is added to blk_1073741851_1027 (size=7016) 2024-11-14T09:30:26,834 INFO [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.45 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/data/hbase/meta/1588230740/.tmp/info/09cd43aa49f842b2b9e754ae37eab83d 2024-11-14T09:30:26,835 INFO [RS_CLOSE_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/data/default/TestLogRolling-testSlowSyncLogRolling/6e1713d974bb85bbc3af5b51982a72b5/info/9afbd9f09ef34ed299fa1aeb61503d54, entries=3, sequenceid=48, filesize=8.0 K 2024-11-14T09:30:26,837 INFO [RS_CLOSE_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 6e1713d974bb85bbc3af5b51982a72b5 in 37ms, sequenceid=48, compaction requested=true 2024-11-14T09:30:26,837 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731576534206.6e1713d974bb85bbc3af5b51982a72b5.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/data/default/TestLogRolling-testSlowSyncLogRolling/6e1713d974bb85bbc3af5b51982a72b5/info/06d9882527b14a4981d08a27972e12fc, hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/data/default/TestLogRolling-testSlowSyncLogRolling/6e1713d974bb85bbc3af5b51982a72b5/info/1bcbbb9263114a7b8da44a50bacc79ca, hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/data/default/TestLogRolling-testSlowSyncLogRolling/6e1713d974bb85bbc3af5b51982a72b5/info/006c536584a945228a97e6a05ce1083d] to archive 2024-11-14T09:30:26,840 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731576534206.6e1713d974bb85bbc3af5b51982a72b5.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-14T09:30:26,844 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731576534206.6e1713d974bb85bbc3af5b51982a72b5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/data/default/TestLogRolling-testSlowSyncLogRolling/6e1713d974bb85bbc3af5b51982a72b5/info/06d9882527b14a4981d08a27972e12fc to hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/archive/data/default/TestLogRolling-testSlowSyncLogRolling/6e1713d974bb85bbc3af5b51982a72b5/info/06d9882527b14a4981d08a27972e12fc 2024-11-14T09:30:26,846 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731576534206.6e1713d974bb85bbc3af5b51982a72b5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/data/default/TestLogRolling-testSlowSyncLogRolling/6e1713d974bb85bbc3af5b51982a72b5/info/1bcbbb9263114a7b8da44a50bacc79ca to hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/archive/data/default/TestLogRolling-testSlowSyncLogRolling/6e1713d974bb85bbc3af5b51982a72b5/info/1bcbbb9263114a7b8da44a50bacc79ca 2024-11-14T09:30:26,848 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731576534206.6e1713d974bb85bbc3af5b51982a72b5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/data/default/TestLogRolling-testSlowSyncLogRolling/6e1713d974bb85bbc3af5b51982a72b5/info/006c536584a945228a97e6a05ce1083d to hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/archive/data/default/TestLogRolling-testSlowSyncLogRolling/6e1713d974bb85bbc3af5b51982a72b5/info/006c536584a945228a97e6a05ce1083d 2024-11-14T09:30:26,858 DEBUG [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/data/hbase/meta/1588230740/.tmp/ns/43f355fce53e44cd8eeb46fc411fab2f is 43, key is default/ns:d/1731576533962/Put/seqid=0 2024-11-14T09:30:26,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33135 is added to blk_1073741852_1028 (size=5153) 2024-11-14T09:30:26,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32977 is added to blk_1073741852_1028 (size=5153) 2024-11-14T09:30:26,860 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731576534206.6e1713d974bb85bbc3af5b51982a72b5.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=83f56b55f2af:41035 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-14T09:30:26,865 WARN [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731576534206.6e1713d974bb85bbc3af5b51982a72b5.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [06d9882527b14a4981d08a27972e12fc=12509, 1bcbbb9263114a7b8da44a50bacc79ca=12509, 006c536584a945228a97e6a05ce1083d=12509] 2024-11-14T09:30:26,865 INFO [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/data/hbase/meta/1588230740/.tmp/ns/43f355fce53e44cd8eeb46fc411fab2f 2024-11-14T09:30:26,871 DEBUG [RS_CLOSE_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/data/default/TestLogRolling-testSlowSyncLogRolling/6e1713d974bb85bbc3af5b51982a72b5/recovered.edits/51.seqid, newMaxSeqId=51, maxSeqId=1 2024-11-14T09:30:26,873 INFO [RS_CLOSE_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1731576534206.6e1713d974bb85bbc3af5b51982a72b5. 2024-11-14T09:30:26,873 DEBUG [RS_CLOSE_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 6e1713d974bb85bbc3af5b51982a72b5: Waiting for close lock at 1731576626799Running coprocessor pre-close hooks at 1731576626799Disabling compacts and flushes for region at 1731576626799Disabling writes for close at 1731576626799Obtaining lock to block concurrent updates at 1731576626800 (+1 ms)Preparing flush snapshotting stores in 6e1713d974bb85bbc3af5b51982a72b5 at 1731576626800Finished memstore snapshotting TestLogRolling-testSlowSyncLogRolling,,1731576534206.6e1713d974bb85bbc3af5b51982a72b5., syncing WAL and waiting on mvcc, flushsize=dataSize=3228, getHeapSize=3696, getOffHeapSize=0, getCellsCount=3 at 1731576626800Flushing stores of TestLogRolling-testSlowSyncLogRolling,,1731576534206.6e1713d974bb85bbc3af5b51982a72b5. at 1731576626801 (+1 ms)Flushing 6e1713d974bb85bbc3af5b51982a72b5/info: creating writer at 1731576626801Flushing 6e1713d974bb85bbc3af5b51982a72b5/info: appending metadata at 1731576626805 (+4 ms)Flushing 6e1713d974bb85bbc3af5b51982a72b5/info: closing flushed file at 1731576626806 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@29c04ffa: reopening flushed file at 1731576626824 (+18 ms)Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 6e1713d974bb85bbc3af5b51982a72b5 in 37ms, sequenceid=48, compaction requested=true at 1731576626837 (+13 ms)Writing region close event to WAL at 1731576626866 (+29 ms)Running coprocessor post-close hooks at 1731576626872 (+6 ms)Closed at 1731576626873 (+1 ms) 2024-11-14T09:30:26,874 DEBUG [RS_CLOSE_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testSlowSyncLogRolling,,1731576534206.6e1713d974bb85bbc3af5b51982a72b5. 2024-11-14T09:30:26,889 DEBUG [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/data/hbase/meta/1588230740/.tmp/table/1d38e4d37e2b4428a9709f6705d3cb28 is 73, key is TestLogRolling-testSlowSyncLogRolling/table:state/1731576534681/Put/seqid=0 2024-11-14T09:30:26,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33135 is added to blk_1073741853_1029 (size=5396) 2024-11-14T09:30:26,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32977 is added to blk_1073741853_1029 (size=5396) 2024-11-14T09:30:26,895 INFO [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=138 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/data/hbase/meta/1588230740/.tmp/table/1d38e4d37e2b4428a9709f6705d3cb28 2024-11-14T09:30:26,903 DEBUG [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/data/hbase/meta/1588230740/.tmp/info/09cd43aa49f842b2b9e754ae37eab83d as hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/data/hbase/meta/1588230740/info/09cd43aa49f842b2b9e754ae37eab83d 2024-11-14T09:30:26,910 INFO [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/data/hbase/meta/1588230740/info/09cd43aa49f842b2b9e754ae37eab83d, entries=10, sequenceid=11, filesize=6.9 K 2024-11-14T09:30:26,911 DEBUG [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/data/hbase/meta/1588230740/.tmp/ns/43f355fce53e44cd8eeb46fc411fab2f as hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/data/hbase/meta/1588230740/ns/43f355fce53e44cd8eeb46fc411fab2f 2024-11-14T09:30:26,918 INFO [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/data/hbase/meta/1588230740/ns/43f355fce53e44cd8eeb46fc411fab2f, entries=2, sequenceid=11, filesize=5.0 K 2024-11-14T09:30:26,919 DEBUG [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/data/hbase/meta/1588230740/.tmp/table/1d38e4d37e2b4428a9709f6705d3cb28 as hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/data/hbase/meta/1588230740/table/1d38e4d37e2b4428a9709f6705d3cb28 2024-11-14T09:30:26,926 INFO [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/data/hbase/meta/1588230740/table/1d38e4d37e2b4428a9709f6705d3cb28, entries=2, sequenceid=11, filesize=5.3 K 2024-11-14T09:30:26,928 INFO [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 128ms, sequenceid=11, compaction requested=false 2024-11-14T09:30:26,933 DEBUG [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-14T09:30:26,934 DEBUG [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-14T09:30:26,934 INFO [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-14T09:30:26,935 DEBUG [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731576626800Running coprocessor pre-close hooks at 1731576626800Disabling compacts and flushes for region at 1731576626800Disabling writes for close at 1731576626800Obtaining lock to block concurrent updates at 1731576626800Preparing flush snapshotting stores in 1588230740 at 1731576626800Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1692, getHeapSize=3696, getOffHeapSize=0, getCellsCount=14 at 1731576626801 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731576626802 (+1 ms)Flushing 1588230740/info: creating writer at 1731576626802Flushing 1588230740/info: appending metadata at 1731576626826 (+24 ms)Flushing 1588230740/info: closing flushed file at 1731576626826Flushing 1588230740/ns: creating writer at 1731576626842 (+16 ms)Flushing 1588230740/ns: appending metadata at 1731576626858 (+16 ms)Flushing 1588230740/ns: closing flushed file at 1731576626858Flushing 1588230740/table: creating writer at 1731576626873 (+15 ms)Flushing 1588230740/table: appending metadata at 1731576626888 (+15 ms)Flushing 1588230740/table: closing flushed file at 1731576626888Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@22c8f37e: reopening flushed file at 1731576626902 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2f1f3f43: reopening flushed file at 1731576626911 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@563fda3b: reopening flushed file at 1731576626918 (+7 ms)Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 128ms, sequenceid=11, compaction requested=false at 1731576626928 (+10 ms)Writing region close event to WAL at 1731576626929 (+1 ms)Running coprocessor post-close hooks at 1731576626934 (+5 ms)Closed at 1731576626934 2024-11-14T09:30:26,935 DEBUG [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-14T09:30:27,001 INFO [RS:0;83f56b55f2af:43205 {}] regionserver.HRegionServer(976): stopping server 83f56b55f2af,43205,1731576532097; all regions closed. 2024-11-14T09:30:27,002 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:30:27,002 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:30:27,002 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:30:27,003 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:30:27,003 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:30:27,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32977 is added to blk_1073741834_1010 (size=3066) 2024-11-14T09:30:27,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33135 is added to blk_1073741834_1010 (size=3066) 2024-11-14T09:30:27,009 DEBUG [RS:0;83f56b55f2af:43205 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/oldWALs 2024-11-14T09:30:27,009 INFO [RS:0;83f56b55f2af:43205 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 83f56b55f2af%2C43205%2C1731576532097.meta:.meta(num 1731576533813) 2024-11-14T09:30:27,010 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:30:27,010 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:30:27,010 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:30:27,010 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:30:27,010 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:30:27,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32977 is added to blk_1073741847_1023 (size=12695) 2024-11-14T09:30:27,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33135 is added to blk_1073741847_1023 (size=12695) 2024-11-14T09:30:27,016 DEBUG [RS:0;83f56b55f2af:43205 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/oldWALs 2024-11-14T09:30:27,016 INFO [RS:0;83f56b55f2af:43205 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 83f56b55f2af%2C43205%2C1731576532097:(num 1731576606748) 2024-11-14T09:30:27,016 DEBUG [RS:0;83f56b55f2af:43205 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T09:30:27,016 INFO [RS:0;83f56b55f2af:43205 {}] regionserver.LeaseManager(133): Closed leases 2024-11-14T09:30:27,016 INFO [RS:0;83f56b55f2af:43205 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-14T09:30:27,016 INFO [RS:0;83f56b55f2af:43205 {}] hbase.ChoreService(370): Chore service for: regionserver/83f56b55f2af:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-14T09:30:27,017 INFO [RS:0;83f56b55f2af:43205 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-14T09:30:27,017 INFO [regionserver/83f56b55f2af:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-14T09:30:27,017 INFO [RS:0;83f56b55f2af:43205 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:43205 2024-11-14T09:30:27,021 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43205-0x10115cef0ca0001, quorum=127.0.0.1:56289, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/83f56b55f2af,43205,1731576532097 2024-11-14T09:30:27,021 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41035-0x10115cef0ca0000, quorum=127.0.0.1:56289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-14T09:30:27,021 INFO [RS:0;83f56b55f2af:43205 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-14T09:30:27,022 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [83f56b55f2af,43205,1731576532097] 2024-11-14T09:30:27,028 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/83f56b55f2af,43205,1731576532097 already deleted, retry=false 2024-11-14T09:30:27,028 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 83f56b55f2af,43205,1731576532097 expired; onlineServers=0 2024-11-14T09:30:27,028 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '83f56b55f2af,41035,1731576531378' ***** 2024-11-14T09:30:27,028 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-14T09:30:27,028 INFO [M:0;83f56b55f2af:41035 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-14T09:30:27,029 INFO [M:0;83f56b55f2af:41035 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-14T09:30:27,029 DEBUG [M:0;83f56b55f2af:41035 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-14T09:30:27,029 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-14T09:30:27,029 DEBUG [M:0;83f56b55f2af:41035 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-14T09:30:27,029 DEBUG [master/83f56b55f2af:0:becomeActiveMaster-HFileCleaner.large.0-1731576533086 {}] cleaner.HFileCleaner(306): Exit Thread[master/83f56b55f2af:0:becomeActiveMaster-HFileCleaner.large.0-1731576533086,5,FailOnTimeoutGroup] 2024-11-14T09:30:27,029 DEBUG [master/83f56b55f2af:0:becomeActiveMaster-HFileCleaner.small.0-1731576533092 {}] cleaner.HFileCleaner(306): Exit Thread[master/83f56b55f2af:0:becomeActiveMaster-HFileCleaner.small.0-1731576533092,5,FailOnTimeoutGroup] 2024-11-14T09:30:27,029 INFO [M:0;83f56b55f2af:41035 {}] hbase.ChoreService(370): Chore service for: master/83f56b55f2af:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-14T09:30:27,029 INFO [M:0;83f56b55f2af:41035 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-14T09:30:27,029 DEBUG [M:0;83f56b55f2af:41035 {}] master.HMaster(1795): Stopping service threads 2024-11-14T09:30:27,030 INFO [M:0;83f56b55f2af:41035 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-14T09:30:27,030 INFO [M:0;83f56b55f2af:41035 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-14T09:30:27,030 INFO [M:0;83f56b55f2af:41035 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-14T09:30:27,030 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-14T09:30:27,031 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41035-0x10115cef0ca0000, quorum=127.0.0.1:56289, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-14T09:30:27,031 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41035-0x10115cef0ca0000, quorum=127.0.0.1:56289, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:30:27,031 DEBUG [M:0;83f56b55f2af:41035 {}] zookeeper.ZKUtil(347): master:41035-0x10115cef0ca0000, quorum=127.0.0.1:56289, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-14T09:30:27,031 WARN [M:0;83f56b55f2af:41035 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-14T09:30:27,032 INFO [M:0;83f56b55f2af:41035 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/.lastflushedseqids 2024-11-14T09:30:27,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32977 is added to blk_1073741854_1030 (size=130) 2024-11-14T09:30:27,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33135 is added to blk_1073741854_1030 (size=130) 2024-11-14T09:30:27,045 INFO [M:0;83f56b55f2af:41035 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-14T09:30:27,045 INFO [M:0;83f56b55f2af:41035 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-14T09:30:27,045 DEBUG [M:0;83f56b55f2af:41035 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-14T09:30:27,045 INFO [M:0;83f56b55f2af:41035 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:30:27,046 DEBUG [M:0;83f56b55f2af:41035 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:30:27,046 DEBUG [M:0;83f56b55f2af:41035 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-14T09:30:27,046 DEBUG [M:0;83f56b55f2af:41035 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:30:27,046 INFO [M:0;83f56b55f2af:41035 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.02 KB heapSize=29.20 KB 2024-11-14T09:30:27,065 DEBUG [M:0;83f56b55f2af:41035 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ea5c6f9db29e47709a04c260f22f855d is 82, key is hbase:meta,,1/info:regioninfo/1731576533887/Put/seqid=0 2024-11-14T09:30:27,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32977 is added to blk_1073741855_1031 (size=5672) 2024-11-14T09:30:27,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33135 is added to blk_1073741855_1031 (size=5672) 2024-11-14T09:30:27,072 INFO [M:0;83f56b55f2af:41035 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ea5c6f9db29e47709a04c260f22f855d 2024-11-14T09:30:27,097 DEBUG [M:0;83f56b55f2af:41035 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/153ea4a10d0849e6aa77d3356059c5a8 is 766, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731576534688/Put/seqid=0 2024-11-14T09:30:27,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32977 is added to blk_1073741856_1032 (size=6247) 2024-11-14T09:30:27,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33135 is added to blk_1073741856_1032 (size=6247) 2024-11-14T09:30:27,104 INFO [M:0;83f56b55f2af:41035 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.42 KB at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/153ea4a10d0849e6aa77d3356059c5a8 2024-11-14T09:30:27,110 INFO [M:0;83f56b55f2af:41035 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 153ea4a10d0849e6aa77d3356059c5a8 2024-11-14T09:30:27,124 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43205-0x10115cef0ca0001, quorum=127.0.0.1:56289, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T09:30:27,124 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43205-0x10115cef0ca0001, quorum=127.0.0.1:56289, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T09:30:27,125 INFO [RS:0;83f56b55f2af:43205 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-14T09:30:27,125 INFO [RS:0;83f56b55f2af:43205 {}] regionserver.HRegionServer(1031): Exiting; stopping=83f56b55f2af,43205,1731576532097; zookeeper connection closed. 2024-11-14T09:30:27,125 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@4f47cbb6 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@4f47cbb6 2024-11-14T09:30:27,126 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-14T09:30:27,127 DEBUG [M:0;83f56b55f2af:41035 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5cd6031cc01e4741945d6716a14a0963 is 69, key is 83f56b55f2af,43205,1731576532097/rs:state/1731576533137/Put/seqid=0 2024-11-14T09:30:27,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33135 is added to blk_1073741857_1033 (size=5156) 2024-11-14T09:30:27,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32977 is added to blk_1073741857_1033 (size=5156) 2024-11-14T09:30:27,133 INFO [M:0;83f56b55f2af:41035 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5cd6031cc01e4741945d6716a14a0963 2024-11-14T09:30:27,164 DEBUG [M:0;83f56b55f2af:41035 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/5622321f28404c7e8ae7a4bbebe987f4 is 52, key is load_balancer_on/state:d/1731576534182/Put/seqid=0 2024-11-14T09:30:27,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33135 is added to blk_1073741858_1034 (size=5056) 2024-11-14T09:30:27,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32977 is added to blk_1073741858_1034 (size=5056) 2024-11-14T09:30:27,172 INFO [M:0;83f56b55f2af:41035 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/5622321f28404c7e8ae7a4bbebe987f4 2024-11-14T09:30:27,180 DEBUG [M:0;83f56b55f2af:41035 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ea5c6f9db29e47709a04c260f22f855d as hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/ea5c6f9db29e47709a04c260f22f855d 2024-11-14T09:30:27,187 INFO [M:0;83f56b55f2af:41035 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/ea5c6f9db29e47709a04c260f22f855d, entries=8, sequenceid=59, filesize=5.5 K 2024-11-14T09:30:27,188 DEBUG [M:0;83f56b55f2af:41035 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/153ea4a10d0849e6aa77d3356059c5a8 as hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/153ea4a10d0849e6aa77d3356059c5a8 2024-11-14T09:30:27,196 INFO [M:0;83f56b55f2af:41035 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 153ea4a10d0849e6aa77d3356059c5a8 2024-11-14T09:30:27,196 INFO [M:0;83f56b55f2af:41035 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/153ea4a10d0849e6aa77d3356059c5a8, entries=6, sequenceid=59, filesize=6.1 K 2024-11-14T09:30:27,197 DEBUG [M:0;83f56b55f2af:41035 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5cd6031cc01e4741945d6716a14a0963 as hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/5cd6031cc01e4741945d6716a14a0963 2024-11-14T09:30:27,204 INFO [M:0;83f56b55f2af:41035 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/5cd6031cc01e4741945d6716a14a0963, entries=1, sequenceid=59, filesize=5.0 K 2024-11-14T09:30:27,205 DEBUG [M:0;83f56b55f2af:41035 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/5622321f28404c7e8ae7a4bbebe987f4 as hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/5622321f28404c7e8ae7a4bbebe987f4 2024-11-14T09:30:27,211 INFO [M:0;83f56b55f2af:41035 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/5622321f28404c7e8ae7a4bbebe987f4, entries=1, sequenceid=59, filesize=4.9 K 2024-11-14T09:30:27,213 INFO [M:0;83f56b55f2af:41035 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 167ms, sequenceid=59, compaction requested=false 2024-11-14T09:30:27,215 INFO [M:0;83f56b55f2af:41035 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:30:27,215 DEBUG [M:0;83f56b55f2af:41035 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731576627045Disabling compacts and flushes for region at 1731576627045Disabling writes for close at 1731576627046 (+1 ms)Obtaining lock to block concurrent updates at 1731576627046Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731576627046Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23576, getHeapSize=29840, getOffHeapSize=0, getCellsCount=70 at 1731576627046Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731576627047 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731576627047Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731576627064 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731576627064Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731576627079 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731576627096 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731576627096Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731576627110 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731576627126 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731576627126Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731576627140 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731576627163 (+23 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731576627163Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6dff79b1: reopening flushed file at 1731576627178 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4a2f56b3: reopening flushed file at 1731576627187 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@8e3eea8: reopening flushed file at 1731576627196 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@783d8873: reopening flushed file at 1731576627204 (+8 ms)Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 167ms, sequenceid=59, compaction requested=false at 1731576627213 (+9 ms)Writing region close event to WAL at 1731576627214 (+1 ms)Closed at 1731576627214 2024-11-14T09:30:27,216 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:30:27,216 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:30:27,216 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:30:27,216 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:30:27,216 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:30:27,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33135 is added to blk_1073741830_1006 (size=27973) 2024-11-14T09:30:27,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32977 is added to blk_1073741830_1006 (size=27973) 2024-11-14T09:30:27,219 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-14T09:30:27,220 INFO [M:0;83f56b55f2af:41035 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-14T09:30:27,220 INFO [M:0;83f56b55f2af:41035 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41035 2024-11-14T09:30:27,220 INFO [M:0;83f56b55f2af:41035 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-14T09:30:27,223 INFO [regionserver/83f56b55f2af:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-14T09:30:27,329 INFO [M:0;83f56b55f2af:41035 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-14T09:30:27,329 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41035-0x10115cef0ca0000, quorum=127.0.0.1:56289, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T09:30:27,329 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41035-0x10115cef0ca0000, quorum=127.0.0.1:56289, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T09:30:27,334 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@54b536b1{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T09:30:27,336 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@59ce19fe{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T09:30:27,336 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T09:30:27,336 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@b18aeba{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T09:30:27,336 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6176039d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6bf3454-f7e1-a565-c984-10ac7976def3/hadoop.log.dir/,STOPPED} 2024-11-14T09:30:27,339 WARN [BP-742282143-172.17.0.2-1731576528106 heartbeating to localhost/127.0.0.1:33501 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T09:30:27,339 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T09:30:27,339 WARN [BP-742282143-172.17.0.2-1731576528106 heartbeating to localhost/127.0.0.1:33501 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-742282143-172.17.0.2-1731576528106 (Datanode Uuid 2d66a2d2-cf87-4ad6-acc4-ad404bbab6c3) service to localhost/127.0.0.1:33501 2024-11-14T09:30:27,339 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T09:30:27,340 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6bf3454-f7e1-a565-c984-10ac7976def3/cluster_ff8efdb6-df0f-5b8c-ec0f-5bdf2b3293cd/data/data3/current/BP-742282143-172.17.0.2-1731576528106 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T09:30:27,341 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6bf3454-f7e1-a565-c984-10ac7976def3/cluster_ff8efdb6-df0f-5b8c-ec0f-5bdf2b3293cd/data/data4/current/BP-742282143-172.17.0.2-1731576528106 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T09:30:27,341 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T09:30:27,343 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4a47b0ed{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T09:30:27,344 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@58445dea{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T09:30:27,344 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T09:30:27,344 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3fc5598e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T09:30:27,344 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@74548cdc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6bf3454-f7e1-a565-c984-10ac7976def3/hadoop.log.dir/,STOPPED} 2024-11-14T09:30:27,346 WARN [BP-742282143-172.17.0.2-1731576528106 heartbeating to localhost/127.0.0.1:33501 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T09:30:27,346 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T09:30:27,346 WARN [BP-742282143-172.17.0.2-1731576528106 heartbeating to localhost/127.0.0.1:33501 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-742282143-172.17.0.2-1731576528106 (Datanode Uuid e705dbf7-6fba-4e42-9825-9befd79346a6) service to localhost/127.0.0.1:33501 2024-11-14T09:30:27,346 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T09:30:27,347 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6bf3454-f7e1-a565-c984-10ac7976def3/cluster_ff8efdb6-df0f-5b8c-ec0f-5bdf2b3293cd/data/data1/current/BP-742282143-172.17.0.2-1731576528106 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T09:30:27,347 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6bf3454-f7e1-a565-c984-10ac7976def3/cluster_ff8efdb6-df0f-5b8c-ec0f-5bdf2b3293cd/data/data2/current/BP-742282143-172.17.0.2-1731576528106 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T09:30:27,348 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T09:30:27,357 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7c1a236c{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-14T09:30:27,358 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5a625720{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T09:30:27,358 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T09:30:27,358 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5ae7f863{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T09:30:27,359 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7c5202f3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6bf3454-f7e1-a565-c984-10ac7976def3/hadoop.log.dir/,STOPPED} 2024-11-14T09:30:27,368 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-14T09:30:27,400 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-14T09:30:27,410 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=77 (was 12) Potentially hanging thread: IPC Client (1709469494) connection to localhost/127.0.0.1:33501 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: nioEventLoopGroup-5-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/83f56b55f2af:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: IPC Client (1709469494) connection to localhost/127.0.0.1:33501 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: nioEventLoopGroup-5-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33501 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:33501 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: weak-ref-cleaner-strictcontextstorage java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33501 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: ForkJoinPool-2-worker-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/83f56b55f2af:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@373a7f2c java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/83f56b55f2af:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: HMaster-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:33501 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33501 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1709469494) connection to localhost/127.0.0.1:33501 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=402 (was 287) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=216 (was 260), ProcessCount=11 (was 11), AvailableMemoryMB=6794 (was 6968) 2024-11-14T09:30:27,416 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=78, OpenFileDescriptor=402, MaxFileDescriptor=1048576, SystemLoadAverage=216, ProcessCount=11, AvailableMemoryMB=6793 2024-11-14T09:30:27,417 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-14T09:30:27,417 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6bf3454-f7e1-a565-c984-10ac7976def3/hadoop.log.dir so I do NOT create it in target/test-data/ff1b6325-f6cb-f083-b357-4454c50ef94b 2024-11-14T09:30:27,417 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6bf3454-f7e1-a565-c984-10ac7976def3/hadoop.tmp.dir so I do NOT create it in target/test-data/ff1b6325-f6cb-f083-b357-4454c50ef94b 2024-11-14T09:30:27,417 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ff1b6325-f6cb-f083-b357-4454c50ef94b/cluster_a9841446-98d7-ba18-9cc3-25d53009acc9, deleteOnExit=true 2024-11-14T09:30:27,417 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-14T09:30:27,417 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ff1b6325-f6cb-f083-b357-4454c50ef94b/test.cache.data in system properties and HBase conf 2024-11-14T09:30:27,417 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ff1b6325-f6cb-f083-b357-4454c50ef94b/hadoop.tmp.dir in system properties and HBase conf 2024-11-14T09:30:27,418 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ff1b6325-f6cb-f083-b357-4454c50ef94b/hadoop.log.dir in system properties and HBase conf 2024-11-14T09:30:27,418 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ff1b6325-f6cb-f083-b357-4454c50ef94b/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-14T09:30:27,418 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ff1b6325-f6cb-f083-b357-4454c50ef94b/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-14T09:30:27,418 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-14T09:30:27,418 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-14T09:30:27,418 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ff1b6325-f6cb-f083-b357-4454c50ef94b/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-14T09:30:27,418 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ff1b6325-f6cb-f083-b357-4454c50ef94b/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-14T09:30:27,418 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ff1b6325-f6cb-f083-b357-4454c50ef94b/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-14T09:30:27,418 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ff1b6325-f6cb-f083-b357-4454c50ef94b/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-14T09:30:27,419 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ff1b6325-f6cb-f083-b357-4454c50ef94b/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-14T09:30:27,419 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ff1b6325-f6cb-f083-b357-4454c50ef94b/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-14T09:30:27,419 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ff1b6325-f6cb-f083-b357-4454c50ef94b/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-14T09:30:27,419 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ff1b6325-f6cb-f083-b357-4454c50ef94b/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-14T09:30:27,419 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ff1b6325-f6cb-f083-b357-4454c50ef94b/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-14T09:30:27,419 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ff1b6325-f6cb-f083-b357-4454c50ef94b/nfs.dump.dir in system properties and HBase conf 2024-11-14T09:30:27,419 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ff1b6325-f6cb-f083-b357-4454c50ef94b/java.io.tmpdir in system properties and HBase conf 2024-11-14T09:30:27,419 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ff1b6325-f6cb-f083-b357-4454c50ef94b/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-14T09:30:27,419 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ff1b6325-f6cb-f083-b357-4454c50ef94b/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-14T09:30:27,419 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ff1b6325-f6cb-f083-b357-4454c50ef94b/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-14T09:30:27,434 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-14T09:30:27,518 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T09:30:27,524 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T09:30:27,525 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T09:30:27,525 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T09:30:27,525 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-14T09:30:27,526 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T09:30:27,527 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1490ab38{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ff1b6325-f6cb-f083-b357-4454c50ef94b/hadoop.log.dir/,AVAILABLE} 2024-11-14T09:30:27,527 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@19bf40f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T09:30:27,653 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@74d3eb32{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ff1b6325-f6cb-f083-b357-4454c50ef94b/java.io.tmpdir/jetty-localhost-44561-hadoop-hdfs-3_4_1-tests_jar-_-any-13832820142001933450/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-14T09:30:27,654 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@245042fb{HTTP/1.1, (http/1.1)}{localhost:44561} 2024-11-14T09:30:27,654 INFO [Time-limited test {}] server.Server(415): Started @101840ms 2024-11-14T09:30:27,669 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-14T09:30:27,746 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T09:30:27,750 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T09:30:27,750 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T09:30:27,750 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T09:30:27,750 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-14T09:30:27,751 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@35b4906c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ff1b6325-f6cb-f083-b357-4454c50ef94b/hadoop.log.dir/,AVAILABLE} 2024-11-14T09:30:27,751 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@232ba44{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T09:30:27,866 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2b1bb178{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ff1b6325-f6cb-f083-b357-4454c50ef94b/java.io.tmpdir/jetty-localhost-45061-hadoop-hdfs-3_4_1-tests_jar-_-any-2782462981787457778/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T09:30:27,867 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1806f1a6{HTTP/1.1, (http/1.1)}{localhost:45061} 2024-11-14T09:30:27,867 INFO [Time-limited test {}] server.Server(415): Started @102053ms 2024-11-14T09:30:27,869 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T09:30:27,908 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T09:30:27,912 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T09:30:27,913 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T09:30:27,913 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T09:30:27,913 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-14T09:30:27,913 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@38d929c4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ff1b6325-f6cb-f083-b357-4454c50ef94b/hadoop.log.dir/,AVAILABLE} 2024-11-14T09:30:27,914 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@74fcc9e4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T09:30:27,988 WARN [Thread-436 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ff1b6325-f6cb-f083-b357-4454c50ef94b/cluster_a9841446-98d7-ba18-9cc3-25d53009acc9/data/data1/current/BP-1828366861-172.17.0.2-1731576627453/current, will proceed with Du for space computation calculation, 2024-11-14T09:30:27,988 WARN [Thread-437 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ff1b6325-f6cb-f083-b357-4454c50ef94b/cluster_a9841446-98d7-ba18-9cc3-25d53009acc9/data/data2/current/BP-1828366861-172.17.0.2-1731576627453/current, will proceed with Du for space computation calculation, 2024-11-14T09:30:28,012 WARN [Thread-415 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T09:30:28,015 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6996284b823f2f93 with lease ID 0x322f9a700b962225: Processing first storage report for DS-bb99eddc-da9a-495e-ab79-d61da3d20148 from datanode DatanodeRegistration(127.0.0.1:42707, datanodeUuid=129d72b2-8933-40ae-9c0a-264da67edadc, infoPort=39973, infoSecurePort=0, ipcPort=33327, storageInfo=lv=-57;cid=testClusterID;nsid=1787999843;c=1731576627453) 2024-11-14T09:30:28,015 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6996284b823f2f93 with lease ID 0x322f9a700b962225: from storage DS-bb99eddc-da9a-495e-ab79-d61da3d20148 node DatanodeRegistration(127.0.0.1:42707, datanodeUuid=129d72b2-8933-40ae-9c0a-264da67edadc, infoPort=39973, infoSecurePort=0, ipcPort=33327, storageInfo=lv=-57;cid=testClusterID;nsid=1787999843;c=1731576627453), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T09:30:28,016 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6996284b823f2f93 with lease ID 0x322f9a700b962225: Processing first storage report for DS-ed05eece-a4c6-4058-88be-1810f1a4aa23 from datanode DatanodeRegistration(127.0.0.1:42707, datanodeUuid=129d72b2-8933-40ae-9c0a-264da67edadc, infoPort=39973, infoSecurePort=0, ipcPort=33327, storageInfo=lv=-57;cid=testClusterID;nsid=1787999843;c=1731576627453) 2024-11-14T09:30:28,016 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6996284b823f2f93 with lease ID 0x322f9a700b962225: from storage DS-ed05eece-a4c6-4058-88be-1810f1a4aa23 node DatanodeRegistration(127.0.0.1:42707, datanodeUuid=129d72b2-8933-40ae-9c0a-264da67edadc, infoPort=39973, infoSecurePort=0, ipcPort=33327, storageInfo=lv=-57;cid=testClusterID;nsid=1787999843;c=1731576627453), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T09:30:28,045 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3739906{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ff1b6325-f6cb-f083-b357-4454c50ef94b/java.io.tmpdir/jetty-localhost-41751-hadoop-hdfs-3_4_1-tests_jar-_-any-17537896478579371297/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T09:30:28,045 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4b4297b9{HTTP/1.1, (http/1.1)}{localhost:41751} 2024-11-14T09:30:28,045 INFO [Time-limited test {}] server.Server(415): Started @102232ms 2024-11-14T09:30:28,048 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T09:30:28,150 WARN [Thread-463 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ff1b6325-f6cb-f083-b357-4454c50ef94b/cluster_a9841446-98d7-ba18-9cc3-25d53009acc9/data/data4/current/BP-1828366861-172.17.0.2-1731576627453/current, will proceed with Du for space computation calculation, 2024-11-14T09:30:28,150 WARN [Thread-462 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ff1b6325-f6cb-f083-b357-4454c50ef94b/cluster_a9841446-98d7-ba18-9cc3-25d53009acc9/data/data3/current/BP-1828366861-172.17.0.2-1731576627453/current, will proceed with Du for space computation calculation, 2024-11-14T09:30:28,174 WARN [Thread-451 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T09:30:28,176 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb1aae2aa6e02dd56 with lease ID 0x322f9a700b962226: Processing first storage report for DS-54172244-49e9-4758-bf0e-de2934bfceb7 from datanode DatanodeRegistration(127.0.0.1:37071, datanodeUuid=fbf0f084-44e0-4e27-8b7f-2b0ba08ddafe, infoPort=44275, infoSecurePort=0, ipcPort=39929, storageInfo=lv=-57;cid=testClusterID;nsid=1787999843;c=1731576627453) 2024-11-14T09:30:28,177 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb1aae2aa6e02dd56 with lease ID 0x322f9a700b962226: from storage DS-54172244-49e9-4758-bf0e-de2934bfceb7 node DatanodeRegistration(127.0.0.1:37071, datanodeUuid=fbf0f084-44e0-4e27-8b7f-2b0ba08ddafe, infoPort=44275, infoSecurePort=0, ipcPort=39929, storageInfo=lv=-57;cid=testClusterID;nsid=1787999843;c=1731576627453), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-14T09:30:28,177 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb1aae2aa6e02dd56 with lease ID 0x322f9a700b962226: Processing first storage report for DS-640a13e0-7d03-4d31-badf-7493b929f078 from datanode DatanodeRegistration(127.0.0.1:37071, datanodeUuid=fbf0f084-44e0-4e27-8b7f-2b0ba08ddafe, infoPort=44275, infoSecurePort=0, ipcPort=39929, storageInfo=lv=-57;cid=testClusterID;nsid=1787999843;c=1731576627453) 2024-11-14T09:30:28,177 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb1aae2aa6e02dd56 with lease ID 0x322f9a700b962226: from storage DS-640a13e0-7d03-4d31-badf-7493b929f078 node DatanodeRegistration(127.0.0.1:37071, datanodeUuid=fbf0f084-44e0-4e27-8b7f-2b0ba08ddafe, infoPort=44275, infoSecurePort=0, ipcPort=39929, storageInfo=lv=-57;cid=testClusterID;nsid=1787999843;c=1731576627453), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T09:30:28,278 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ff1b6325-f6cb-f083-b357-4454c50ef94b 2024-11-14T09:30:28,282 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ff1b6325-f6cb-f083-b357-4454c50ef94b/cluster_a9841446-98d7-ba18-9cc3-25d53009acc9/zookeeper_0, clientPort=62495, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ff1b6325-f6cb-f083-b357-4454c50ef94b/cluster_a9841446-98d7-ba18-9cc3-25d53009acc9/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ff1b6325-f6cb-f083-b357-4454c50ef94b/cluster_a9841446-98d7-ba18-9cc3-25d53009acc9/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-14T09:30:28,283 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=62495 2024-11-14T09:30:28,284 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:30:28,285 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:30:28,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42707 is added to blk_1073741825_1001 (size=7) 2024-11-14T09:30:28,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37071 is added to blk_1073741825_1001 (size=7) 2024-11-14T09:30:28,299 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:43643/user/jenkins/test-data/927f76c5-bf82-0c60-2d30-0adc8f93c824 with version=8 2024-11-14T09:30:28,299 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/hbase-staging 2024-11-14T09:30:28,301 INFO [Time-limited test {}] client.ConnectionUtils(128): master/83f56b55f2af:0 server-side Connection retries=45 2024-11-14T09:30:28,302 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T09:30:28,302 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-14T09:30:28,302 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-14T09:30:28,302 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T09:30:28,302 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-14T09:30:28,302 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-14T09:30:28,302 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-14T09:30:28,303 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34157 2024-11-14T09:30:28,305 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:34157 connecting to ZooKeeper ensemble=127.0.0.1:62495 2024-11-14T09:30:28,312 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:341570x0, quorum=127.0.0.1:62495, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-14T09:30:28,314 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:34157-0x10115d06ec80000 connected 2024-11-14T09:30:28,337 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:30:28,340 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:30:28,349 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34157-0x10115d06ec80000, quorum=127.0.0.1:62495, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T09:30:28,349 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:43643/user/jenkins/test-data/927f76c5-bf82-0c60-2d30-0adc8f93c824, hbase.cluster.distributed=false 2024-11-14T09:30:28,352 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34157-0x10115d06ec80000, quorum=127.0.0.1:62495, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-14T09:30:28,354 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34157 2024-11-14T09:30:28,356 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34157 2024-11-14T09:30:28,360 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34157 2024-11-14T09:30:28,362 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34157 2024-11-14T09:30:28,362 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34157 2024-11-14T09:30:28,381 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/83f56b55f2af:0 server-side Connection retries=45 2024-11-14T09:30:28,381 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T09:30:28,381 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-14T09:30:28,381 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-14T09:30:28,381 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T09:30:28,381 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-14T09:30:28,381 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-14T09:30:28,382 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-14T09:30:28,382 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44641 2024-11-14T09:30:28,384 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:44641 connecting to ZooKeeper ensemble=127.0.0.1:62495 2024-11-14T09:30:28,385 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:30:28,387 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:30:28,396 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:446410x0, quorum=127.0.0.1:62495, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-14T09:30:28,397 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:446410x0, quorum=127.0.0.1:62495, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T09:30:28,398 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-14T09:30:28,400 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:44641-0x10115d06ec80001 connected 2024-11-14T09:30:28,406 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-14T09:30:28,407 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44641-0x10115d06ec80001, quorum=127.0.0.1:62495, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-14T09:30:28,409 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44641-0x10115d06ec80001, quorum=127.0.0.1:62495, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-14T09:30:28,416 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44641 2024-11-14T09:30:28,416 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44641 2024-11-14T09:30:28,419 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44641 2024-11-14T09:30:28,420 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44641 2024-11-14T09:30:28,421 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44641 2024-11-14T09:30:28,439 DEBUG [M:0;83f56b55f2af:34157 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;83f56b55f2af:34157 2024-11-14T09:30:28,440 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/83f56b55f2af,34157,1731576628301 2024-11-14T09:30:28,442 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34157-0x10115d06ec80000, quorum=127.0.0.1:62495, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T09:30:28,442 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44641-0x10115d06ec80001, quorum=127.0.0.1:62495, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T09:30:28,443 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34157-0x10115d06ec80000, quorum=127.0.0.1:62495, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/83f56b55f2af,34157,1731576628301 2024-11-14T09:30:28,446 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44641-0x10115d06ec80001, quorum=127.0.0.1:62495, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-14T09:30:28,446 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44641-0x10115d06ec80001, quorum=127.0.0.1:62495, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:30:28,446 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34157-0x10115d06ec80000, quorum=127.0.0.1:62495, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:30:28,449 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34157-0x10115d06ec80000, quorum=127.0.0.1:62495, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-14T09:30:28,450 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/83f56b55f2af,34157,1731576628301 from backup master directory 2024-11-14T09:30:28,453 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34157-0x10115d06ec80000, quorum=127.0.0.1:62495, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/83f56b55f2af,34157,1731576628301 2024-11-14T09:30:28,453 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44641-0x10115d06ec80001, quorum=127.0.0.1:62495, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T09:30:28,453 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34157-0x10115d06ec80000, quorum=127.0.0.1:62495, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T09:30:28,453 WARN [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-14T09:30:28,453 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=83f56b55f2af,34157,1731576628301 2024-11-14T09:30:28,460 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:43643/user/jenkins/test-data/927f76c5-bf82-0c60-2d30-0adc8f93c824/hbase.id] with ID: 07a9f990-4986-4dc7-a979-d48283a3a68d 2024-11-14T09:30:28,460 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:43643/user/jenkins/test-data/927f76c5-bf82-0c60-2d30-0adc8f93c824/.tmp/hbase.id 2024-11-14T09:30:28,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42707 is added to blk_1073741826_1002 (size=42) 2024-11-14T09:30:28,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37071 is added to blk_1073741826_1002 (size=42) 2024-11-14T09:30:28,481 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:43643/user/jenkins/test-data/927f76c5-bf82-0c60-2d30-0adc8f93c824/.tmp/hbase.id]:[hdfs://localhost:43643/user/jenkins/test-data/927f76c5-bf82-0c60-2d30-0adc8f93c824/hbase.id] 2024-11-14T09:30:28,503 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:30:28,503 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-14T09:30:28,505 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-14T09:30:28,508 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34157-0x10115d06ec80000, quorum=127.0.0.1:62495, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:30:28,508 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44641-0x10115d06ec80001, quorum=127.0.0.1:62495, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:30:28,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37071 is added to blk_1073741827_1003 (size=196) 2024-11-14T09:30:28,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42707 is added to blk_1073741827_1003 (size=196) 2024-11-14T09:30:28,924 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-14T09:30:28,926 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-14T09:30:28,926 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T09:30:28,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37071 is added to blk_1073741828_1004 (size=1189) 2024-11-14T09:30:28,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42707 is added to blk_1073741828_1004 (size=1189) 2024-11-14T09:30:29,348 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:43643/user/jenkins/test-data/927f76c5-bf82-0c60-2d30-0adc8f93c824/MasterData/data/master/store 2024-11-14T09:30:29,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42707 is added to blk_1073741829_1005 (size=34) 2024-11-14T09:30:29,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37071 is added to blk_1073741829_1005 (size=34) 2024-11-14T09:30:29,366 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T09:30:29,367 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-14T09:30:29,367 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:30:29,367 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:30:29,367 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-14T09:30:29,367 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:30:29,367 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:30:29,367 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731576629367Disabling compacts and flushes for region at 1731576629367Disabling writes for close at 1731576629367Writing region close event to WAL at 1731576629367Closed at 1731576629367 2024-11-14T09:30:29,369 WARN [master/83f56b55f2af:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:43643/user/jenkins/test-data/927f76c5-bf82-0c60-2d30-0adc8f93c824/MasterData/data/master/store/.initializing 2024-11-14T09:30:29,369 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:43643/user/jenkins/test-data/927f76c5-bf82-0c60-2d30-0adc8f93c824/MasterData/WALs/83f56b55f2af,34157,1731576628301 2024-11-14T09:30:29,373 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=83f56b55f2af%2C34157%2C1731576628301, suffix=, logDir=hdfs://localhost:43643/user/jenkins/test-data/927f76c5-bf82-0c60-2d30-0adc8f93c824/MasterData/WALs/83f56b55f2af,34157,1731576628301, archiveDir=hdfs://localhost:43643/user/jenkins/test-data/927f76c5-bf82-0c60-2d30-0adc8f93c824/MasterData/oldWALs, maxLogs=10 2024-11-14T09:30:29,373 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 83f56b55f2af%2C34157%2C1731576628301.1731576629373 2024-11-14T09:30:29,386 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/927f76c5-bf82-0c60-2d30-0adc8f93c824/MasterData/WALs/83f56b55f2af,34157,1731576628301/83f56b55f2af%2C34157%2C1731576628301.1731576629373 2024-11-14T09:30:29,390 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39973:39973),(127.0.0.1/127.0.0.1:44275:44275)] 2024-11-14T09:30:29,393 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-14T09:30:29,393 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T09:30:29,394 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:30:29,394 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:30:29,396 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:30:29,398 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-14T09:30:29,398 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:30:29,399 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:30:29,399 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:30:29,401 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-14T09:30:29,401 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:30:29,403 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T09:30:29,403 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:30:29,406 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-14T09:30:29,406 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:30:29,407 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T09:30:29,407 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:30:29,408 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-14T09:30:29,409 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:30:29,409 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T09:30:29,409 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:30:29,410 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43643/user/jenkins/test-data/927f76c5-bf82-0c60-2d30-0adc8f93c824/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:30:29,411 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43643/user/jenkins/test-data/927f76c5-bf82-0c60-2d30-0adc8f93c824/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:30:29,413 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:30:29,413 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:30:29,414 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-14T09:30:29,415 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:30:29,418 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43643/user/jenkins/test-data/927f76c5-bf82-0c60-2d30-0adc8f93c824/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T09:30:29,419 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=736266, jitterRate=-0.0637902319431305}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-14T09:30:29,421 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731576629394Initializing all the Stores at 1731576629395 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731576629396 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731576629396Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731576629396Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731576629396Cleaning up temporary data from old regions at 1731576629413 (+17 ms)Region opened successfully at 1731576629420 (+7 ms) 2024-11-14T09:30:29,424 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-14T09:30:29,429 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4c37a82e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=83f56b55f2af/172.17.0.2:0 2024-11-14T09:30:29,430 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-14T09:30:29,430 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-14T09:30:29,430 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-14T09:30:29,431 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-14T09:30:29,431 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-14T09:30:29,432 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-14T09:30:29,432 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-14T09:30:29,434 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-14T09:30:29,435 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34157-0x10115d06ec80000, quorum=127.0.0.1:62495, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-14T09:30:29,436 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-14T09:30:29,437 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-14T09:30:29,437 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34157-0x10115d06ec80000, quorum=127.0.0.1:62495, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-14T09:30:29,440 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-14T09:30:29,440 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-14T09:30:29,441 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34157-0x10115d06ec80000, quorum=127.0.0.1:62495, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-14T09:30:29,443 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-14T09:30:29,443 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34157-0x10115d06ec80000, quorum=127.0.0.1:62495, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-14T09:30:29,445 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-14T09:30:29,447 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34157-0x10115d06ec80000, quorum=127.0.0.1:62495, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-14T09:30:29,448 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-14T09:30:29,455 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34157-0x10115d06ec80000, quorum=127.0.0.1:62495, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-14T09:30:29,455 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34157-0x10115d06ec80000, quorum=127.0.0.1:62495, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:30:29,455 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44641-0x10115d06ec80001, quorum=127.0.0.1:62495, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-14T09:30:29,455 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44641-0x10115d06ec80001, quorum=127.0.0.1:62495, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:30:29,455 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=83f56b55f2af,34157,1731576628301, sessionid=0x10115d06ec80000, setting cluster-up flag (Was=false) 2024-11-14T09:30:29,459 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34157-0x10115d06ec80000, quorum=127.0.0.1:62495, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:30:29,459 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44641-0x10115d06ec80001, quorum=127.0.0.1:62495, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:30:29,464 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-14T09:30:29,465 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=83f56b55f2af,34157,1731576628301 2024-11-14T09:30:29,468 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44641-0x10115d06ec80001, quorum=127.0.0.1:62495, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:30:29,468 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34157-0x10115d06ec80000, quorum=127.0.0.1:62495, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:30:29,475 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-14T09:30:29,476 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=83f56b55f2af,34157,1731576628301 2024-11-14T09:30:29,477 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:43643/user/jenkins/test-data/927f76c5-bf82-0c60-2d30-0adc8f93c824/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-14T09:30:29,479 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-14T09:30:29,480 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-14T09:30:29,480 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-14T09:30:29,480 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 83f56b55f2af,34157,1731576628301 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-14T09:30:29,481 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/83f56b55f2af:0, corePoolSize=5, maxPoolSize=5 2024-11-14T09:30:29,482 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/83f56b55f2af:0, corePoolSize=5, maxPoolSize=5 2024-11-14T09:30:29,482 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/83f56b55f2af:0, corePoolSize=5, maxPoolSize=5 2024-11-14T09:30:29,482 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/83f56b55f2af:0, corePoolSize=5, maxPoolSize=5 2024-11-14T09:30:29,482 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/83f56b55f2af:0, corePoolSize=10, maxPoolSize=10 2024-11-14T09:30:29,482 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:30:29,482 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/83f56b55f2af:0, corePoolSize=2, maxPoolSize=2 2024-11-14T09:30:29,482 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:30:29,483 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731576659483 2024-11-14T09:30:29,483 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-14T09:30:29,483 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-14T09:30:29,483 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-14T09:30:29,483 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-14T09:30:29,483 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-14T09:30:29,483 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-14T09:30:29,483 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-14T09:30:29,484 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-14T09:30:29,484 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-14T09:30:29,484 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-14T09:30:29,484 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-14T09:30:29,484 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-14T09:30:29,484 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T09:30:29,484 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-14T09:30:29,484 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/83f56b55f2af:0:becomeActiveMaster-HFileCleaner.large.0-1731576629484,5,FailOnTimeoutGroup] 2024-11-14T09:30:29,485 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/83f56b55f2af:0:becomeActiveMaster-HFileCleaner.small.0-1731576629484,5,FailOnTimeoutGroup] 2024-11-14T09:30:29,485 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-14T09:30:29,485 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-14T09:30:29,485 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-14T09:30:29,485 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-14T09:30:29,486 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:30:29,486 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-14T09:30:29,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42707 is added to blk_1073741831_1007 (size=1321) 2024-11-14T09:30:29,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37071 is added to blk_1073741831_1007 (size=1321) 2024-11-14T09:30:29,494 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:43643/user/jenkins/test-data/927f76c5-bf82-0c60-2d30-0adc8f93c824/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-14T09:30:29,494 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:43643/user/jenkins/test-data/927f76c5-bf82-0c60-2d30-0adc8f93c824 2024-11-14T09:30:29,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42707 is added to blk_1073741832_1008 (size=32) 2024-11-14T09:30:29,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37071 is added to blk_1073741832_1008 (size=32) 2024-11-14T09:30:29,503 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T09:30:29,504 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-14T09:30:29,506 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-14T09:30:29,506 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:30:29,507 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:30:29,507 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-14T09:30:29,508 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-14T09:30:29,508 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:30:29,509 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:30:29,509 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-14T09:30:29,511 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-14T09:30:29,511 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:30:29,511 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:30:29,511 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-14T09:30:29,513 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-14T09:30:29,513 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:30:29,514 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:30:29,514 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-14T09:30:29,515 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43643/user/jenkins/test-data/927f76c5-bf82-0c60-2d30-0adc8f93c824/data/hbase/meta/1588230740 2024-11-14T09:30:29,515 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43643/user/jenkins/test-data/927f76c5-bf82-0c60-2d30-0adc8f93c824/data/hbase/meta/1588230740 2024-11-14T09:30:29,516 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-14T09:30:29,516 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-14T09:30:29,517 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-14T09:30:29,518 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-14T09:30:29,521 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43643/user/jenkins/test-data/927f76c5-bf82-0c60-2d30-0adc8f93c824/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T09:30:29,522 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=751169, jitterRate=-0.044839322566986084}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-14T09:30:29,523 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731576629503Initializing all the Stores at 1731576629504 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731576629504Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731576629504Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731576629504Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731576629504Cleaning up temporary data from old regions at 1731576629516 (+12 ms)Region opened successfully at 1731576629523 (+7 ms) 2024-11-14T09:30:29,524 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-14T09:30:29,524 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-14T09:30:29,524 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-14T09:30:29,524 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-14T09:30:29,524 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-14T09:30:29,524 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-14T09:30:29,524 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731576629523Disabling compacts and flushes for region at 1731576629523Disabling writes for close at 1731576629524 (+1 ms)Writing region close event to WAL at 1731576629524Closed at 1731576629524 2024-11-14T09:30:29,526 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T09:30:29,526 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-14T09:30:29,527 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-14T09:30:29,528 INFO [RS:0;83f56b55f2af:44641 {}] regionserver.HRegionServer(746): ClusterId : 07a9f990-4986-4dc7-a979-d48283a3a68d 2024-11-14T09:30:29,528 DEBUG [RS:0;83f56b55f2af:44641 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-14T09:30:29,528 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-14T09:30:29,530 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-14T09:30:29,531 DEBUG [RS:0;83f56b55f2af:44641 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-14T09:30:29,531 DEBUG [RS:0;83f56b55f2af:44641 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-14T09:30:29,539 DEBUG [RS:0;83f56b55f2af:44641 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-14T09:30:29,540 DEBUG [RS:0;83f56b55f2af:44641 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6f60118f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=83f56b55f2af/172.17.0.2:0 2024-11-14T09:30:29,553 DEBUG [RS:0;83f56b55f2af:44641 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;83f56b55f2af:44641 2024-11-14T09:30:29,553 INFO [RS:0;83f56b55f2af:44641 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-14T09:30:29,553 INFO [RS:0;83f56b55f2af:44641 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-14T09:30:29,553 DEBUG [RS:0;83f56b55f2af:44641 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-14T09:30:29,554 INFO [RS:0;83f56b55f2af:44641 {}] regionserver.HRegionServer(2659): reportForDuty to master=83f56b55f2af,34157,1731576628301 with port=44641, startcode=1731576628380 2024-11-14T09:30:29,555 DEBUG [RS:0;83f56b55f2af:44641 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-14T09:30:29,557 INFO [HMaster-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53741, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-14T09:30:29,558 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34157 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 83f56b55f2af,44641,1731576628380 2024-11-14T09:30:29,558 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34157 {}] master.ServerManager(517): Registering regionserver=83f56b55f2af,44641,1731576628380 2024-11-14T09:30:29,560 DEBUG [RS:0;83f56b55f2af:44641 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:43643/user/jenkins/test-data/927f76c5-bf82-0c60-2d30-0adc8f93c824 2024-11-14T09:30:29,560 DEBUG [RS:0;83f56b55f2af:44641 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:43643 2024-11-14T09:30:29,560 DEBUG [RS:0;83f56b55f2af:44641 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-14T09:30:29,562 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34157-0x10115d06ec80000, quorum=127.0.0.1:62495, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-14T09:30:29,563 DEBUG [RS:0;83f56b55f2af:44641 {}] zookeeper.ZKUtil(111): regionserver:44641-0x10115d06ec80001, quorum=127.0.0.1:62495, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/83f56b55f2af,44641,1731576628380 2024-11-14T09:30:29,563 WARN [RS:0;83f56b55f2af:44641 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-14T09:30:29,563 INFO [RS:0;83f56b55f2af:44641 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T09:30:29,563 DEBUG [RS:0;83f56b55f2af:44641 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:43643/user/jenkins/test-data/927f76c5-bf82-0c60-2d30-0adc8f93c824/WALs/83f56b55f2af,44641,1731576628380 2024-11-14T09:30:29,563 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [83f56b55f2af,44641,1731576628380] 2024-11-14T09:30:29,567 INFO [RS:0;83f56b55f2af:44641 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-14T09:30:29,569 INFO [RS:0;83f56b55f2af:44641 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-14T09:30:29,572 INFO [RS:0;83f56b55f2af:44641 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-14T09:30:29,572 INFO [RS:0;83f56b55f2af:44641 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T09:30:29,573 INFO [RS:0;83f56b55f2af:44641 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-14T09:30:29,574 INFO [RS:0;83f56b55f2af:44641 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-14T09:30:29,574 INFO [RS:0;83f56b55f2af:44641 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-14T09:30:29,574 DEBUG [RS:0;83f56b55f2af:44641 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:30:29,574 DEBUG [RS:0;83f56b55f2af:44641 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:30:29,574 DEBUG [RS:0;83f56b55f2af:44641 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:30:29,574 DEBUG [RS:0;83f56b55f2af:44641 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:30:29,574 DEBUG [RS:0;83f56b55f2af:44641 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:30:29,574 DEBUG [RS:0;83f56b55f2af:44641 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/83f56b55f2af:0, corePoolSize=2, maxPoolSize=2 2024-11-14T09:30:29,574 DEBUG [RS:0;83f56b55f2af:44641 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:30:29,574 DEBUG [RS:0;83f56b55f2af:44641 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:30:29,574 DEBUG [RS:0;83f56b55f2af:44641 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:30:29,574 DEBUG [RS:0;83f56b55f2af:44641 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:30:29,574 DEBUG [RS:0;83f56b55f2af:44641 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:30:29,574 DEBUG [RS:0;83f56b55f2af:44641 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:30:29,574 DEBUG [RS:0;83f56b55f2af:44641 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/83f56b55f2af:0, corePoolSize=3, maxPoolSize=3 2024-11-14T09:30:29,574 DEBUG [RS:0;83f56b55f2af:44641 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/83f56b55f2af:0, corePoolSize=3, maxPoolSize=3 2024-11-14T09:30:29,575 INFO [RS:0;83f56b55f2af:44641 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-14T09:30:29,575 INFO [RS:0;83f56b55f2af:44641 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-14T09:30:29,575 INFO [RS:0;83f56b55f2af:44641 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T09:30:29,575 INFO [RS:0;83f56b55f2af:44641 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-14T09:30:29,576 INFO [RS:0;83f56b55f2af:44641 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-14T09:30:29,576 INFO [RS:0;83f56b55f2af:44641 {}] hbase.ChoreService(168): Chore ScheduledChore name=83f56b55f2af,44641,1731576628380-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-14T09:30:29,592 INFO [RS:0;83f56b55f2af:44641 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-14T09:30:29,592 INFO [RS:0;83f56b55f2af:44641 {}] hbase.ChoreService(168): Chore ScheduledChore name=83f56b55f2af,44641,1731576628380-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T09:30:29,592 INFO [RS:0;83f56b55f2af:44641 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T09:30:29,592 INFO [RS:0;83f56b55f2af:44641 {}] regionserver.Replication(171): 83f56b55f2af,44641,1731576628380 started 2024-11-14T09:30:29,608 INFO [RS:0;83f56b55f2af:44641 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T09:30:29,608 INFO [RS:0;83f56b55f2af:44641 {}] regionserver.HRegionServer(1482): Serving as 83f56b55f2af,44641,1731576628380, RpcServer on 83f56b55f2af/172.17.0.2:44641, sessionid=0x10115d06ec80001 2024-11-14T09:30:29,608 DEBUG [RS:0;83f56b55f2af:44641 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-14T09:30:29,608 DEBUG [RS:0;83f56b55f2af:44641 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 83f56b55f2af,44641,1731576628380 2024-11-14T09:30:29,608 DEBUG [RS:0;83f56b55f2af:44641 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '83f56b55f2af,44641,1731576628380' 2024-11-14T09:30:29,608 DEBUG [RS:0;83f56b55f2af:44641 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-14T09:30:29,609 DEBUG [RS:0;83f56b55f2af:44641 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-14T09:30:29,609 DEBUG [RS:0;83f56b55f2af:44641 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-14T09:30:29,609 DEBUG [RS:0;83f56b55f2af:44641 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-14T09:30:29,609 DEBUG [RS:0;83f56b55f2af:44641 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 83f56b55f2af,44641,1731576628380 2024-11-14T09:30:29,609 DEBUG [RS:0;83f56b55f2af:44641 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '83f56b55f2af,44641,1731576628380' 2024-11-14T09:30:29,610 DEBUG [RS:0;83f56b55f2af:44641 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-14T09:30:29,610 DEBUG [RS:0;83f56b55f2af:44641 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-14T09:30:29,610 DEBUG [RS:0;83f56b55f2af:44641 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-14T09:30:29,610 INFO [RS:0;83f56b55f2af:44641 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-14T09:30:29,610 INFO [RS:0;83f56b55f2af:44641 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-14T09:30:29,680 WARN [83f56b55f2af:34157 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-14T09:30:29,713 INFO [RS:0;83f56b55f2af:44641 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=83f56b55f2af%2C44641%2C1731576628380, suffix=, logDir=hdfs://localhost:43643/user/jenkins/test-data/927f76c5-bf82-0c60-2d30-0adc8f93c824/WALs/83f56b55f2af,44641,1731576628380, archiveDir=hdfs://localhost:43643/user/jenkins/test-data/927f76c5-bf82-0c60-2d30-0adc8f93c824/oldWALs, maxLogs=32 2024-11-14T09:30:29,715 INFO [RS:0;83f56b55f2af:44641 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 83f56b55f2af%2C44641%2C1731576628380.1731576629715 2024-11-14T09:30:29,722 INFO [RS:0;83f56b55f2af:44641 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/927f76c5-bf82-0c60-2d30-0adc8f93c824/WALs/83f56b55f2af,44641,1731576628380/83f56b55f2af%2C44641%2C1731576628380.1731576629715 2024-11-14T09:30:29,723 DEBUG [RS:0;83f56b55f2af:44641 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39973:39973),(127.0.0.1/127.0.0.1:44275:44275)] 2024-11-14T09:30:29,931 DEBUG [83f56b55f2af:34157 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-14T09:30:29,931 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=83f56b55f2af,44641,1731576628380 2024-11-14T09:30:29,933 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 83f56b55f2af,44641,1731576628380, state=OPENING 2024-11-14T09:30:29,935 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-14T09:30:29,936 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44641-0x10115d06ec80001, quorum=127.0.0.1:62495, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:30:29,937 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34157-0x10115d06ec80000, quorum=127.0.0.1:62495, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:30:29,937 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T09:30:29,937 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T09:30:29,937 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-14T09:30:29,937 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=83f56b55f2af,44641,1731576628380}] 2024-11-14T09:30:30,091 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-14T09:30:30,094 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41233, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-14T09:30:30,098 INFO [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-14T09:30:30,098 INFO [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T09:30:30,100 INFO [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=83f56b55f2af%2C44641%2C1731576628380.meta, suffix=.meta, logDir=hdfs://localhost:43643/user/jenkins/test-data/927f76c5-bf82-0c60-2d30-0adc8f93c824/WALs/83f56b55f2af,44641,1731576628380, archiveDir=hdfs://localhost:43643/user/jenkins/test-data/927f76c5-bf82-0c60-2d30-0adc8f93c824/oldWALs, maxLogs=32 2024-11-14T09:30:30,102 INFO [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 83f56b55f2af%2C44641%2C1731576628380.meta.1731576630102.meta 2024-11-14T09:30:30,107 INFO [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/927f76c5-bf82-0c60-2d30-0adc8f93c824/WALs/83f56b55f2af,44641,1731576628380/83f56b55f2af%2C44641%2C1731576628380.meta.1731576630102.meta 2024-11-14T09:30:30,108 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44275:44275),(127.0.0.1/127.0.0.1:39973:39973)] 2024-11-14T09:30:30,109 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-14T09:30:30,110 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-14T09:30:30,110 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-14T09:30:30,110 INFO [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-14T09:30:30,110 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-14T09:30:30,110 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T09:30:30,110 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-14T09:30:30,110 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-14T09:30:30,112 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-14T09:30:30,113 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-14T09:30:30,113 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:30:30,113 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:30:30,113 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-14T09:30:30,114 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-14T09:30:30,114 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:30:30,115 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:30:30,115 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-14T09:30:30,116 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-14T09:30:30,116 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:30:30,116 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:30:30,117 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-14T09:30:30,117 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-14T09:30:30,118 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:30:30,118 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:30:30,118 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-14T09:30:30,119 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43643/user/jenkins/test-data/927f76c5-bf82-0c60-2d30-0adc8f93c824/data/hbase/meta/1588230740 2024-11-14T09:30:30,120 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43643/user/jenkins/test-data/927f76c5-bf82-0c60-2d30-0adc8f93c824/data/hbase/meta/1588230740 2024-11-14T09:30:30,122 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-14T09:30:30,122 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-14T09:30:30,123 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-14T09:30:30,124 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-14T09:30:30,125 INFO [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=826217, jitterRate=0.05059035122394562}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-14T09:30:30,125 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-14T09:30:30,126 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731576630110Writing region info on filesystem at 1731576630110Initializing all the Stores at 1731576630111 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731576630112 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731576630112Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731576630112Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731576630112Cleaning up temporary data from old regions at 1731576630122 (+10 ms)Running coprocessor post-open hooks at 1731576630125 (+3 ms)Region opened successfully at 1731576630126 (+1 ms) 2024-11-14T09:30:30,127 INFO [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731576630091 2024-11-14T09:30:30,131 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-14T09:30:30,131 INFO [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-14T09:30:30,132 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=83f56b55f2af,44641,1731576628380 2024-11-14T09:30:30,133 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 83f56b55f2af,44641,1731576628380, state=OPEN 2024-11-14T09:30:30,140 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44641-0x10115d06ec80001, quorum=127.0.0.1:62495, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-14T09:30:30,140 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34157-0x10115d06ec80000, quorum=127.0.0.1:62495, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-14T09:30:30,140 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=83f56b55f2af,44641,1731576628380 2024-11-14T09:30:30,140 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T09:30:30,140 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T09:30:30,143 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-14T09:30:30,144 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=83f56b55f2af,44641,1731576628380 in 203 msec 2024-11-14T09:30:30,147 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-14T09:30:30,147 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 618 msec 2024-11-14T09:30:30,148 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T09:30:30,148 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-14T09:30:30,150 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-14T09:30:30,151 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=83f56b55f2af,44641,1731576628380, seqNum=-1] 2024-11-14T09:30:30,151 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-14T09:30:30,153 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60923, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-14T09:30:30,160 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 679 msec 2024-11-14T09:30:30,160 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731576630160, completionTime=-1 2024-11-14T09:30:30,160 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-14T09:30:30,160 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-14T09:30:30,162 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-14T09:30:30,162 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731576690162 2024-11-14T09:30:30,162 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731576750162 2024-11-14T09:30:30,162 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-14T09:30:30,162 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=83f56b55f2af,34157,1731576628301-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T09:30:30,162 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=83f56b55f2af,34157,1731576628301-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T09:30:30,162 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=83f56b55f2af,34157,1731576628301-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T09:30:30,162 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-83f56b55f2af:34157, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T09:30:30,163 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-14T09:30:30,163 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-14T09:30:30,165 DEBUG [master/83f56b55f2af:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-14T09:30:30,167 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.714sec 2024-11-14T09:30:30,167 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-14T09:30:30,167 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-14T09:30:30,168 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-14T09:30:30,168 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-14T09:30:30,168 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-14T09:30:30,168 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=83f56b55f2af,34157,1731576628301-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-14T09:30:30,168 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=83f56b55f2af,34157,1731576628301-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-14T09:30:30,171 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-14T09:30:30,171 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-14T09:30:30,171 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=83f56b55f2af,34157,1731576628301-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T09:30:30,228 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@d397384, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T09:30:30,228 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 83f56b55f2af,34157,-1 for getting cluster id 2024-11-14T09:30:30,229 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-14T09:30:30,231 DEBUG [HMaster-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '07a9f990-4986-4dc7-a979-d48283a3a68d' 2024-11-14T09:30:30,232 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-14T09:30:30,232 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "07a9f990-4986-4dc7-a979-d48283a3a68d" 2024-11-14T09:30:30,232 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@111b08da, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T09:30:30,232 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [83f56b55f2af,34157,-1] 2024-11-14T09:30:30,232 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-14T09:30:30,233 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T09:30:30,234 INFO [HMaster-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50856, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-14T09:30:30,235 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@46daa1a5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T09:30:30,236 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-14T09:30:30,237 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=83f56b55f2af,44641,1731576628380, seqNum=-1] 2024-11-14T09:30:30,237 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-14T09:30:30,239 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48668, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-14T09:30:30,241 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=83f56b55f2af,34157,1731576628301 2024-11-14T09:30:30,242 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:30:30,246 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-14T09:30:30,247 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-14T09:30:30,247 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-14T09:30:30,247 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T09:30:30,247 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T09:30:30,247 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T09:30:30,247 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-14T09:30:30,248 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-14T09:30:30,248 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=2029051909, stopped=false 2024-11-14T09:30:30,248 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=83f56b55f2af,34157,1731576628301 2024-11-14T09:30:30,250 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44641-0x10115d06ec80001, quorum=127.0.0.1:62495, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-14T09:30:30,250 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34157-0x10115d06ec80000, quorum=127.0.0.1:62495, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-14T09:30:30,250 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44641-0x10115d06ec80001, quorum=127.0.0.1:62495, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:30:30,250 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-14T09:30:30,250 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34157-0x10115d06ec80000, quorum=127.0.0.1:62495, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:30:30,250 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-14T09:30:30,251 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T09:30:30,251 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T09:30:30,251 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '83f56b55f2af,44641,1731576628380' ***** 2024-11-14T09:30:30,251 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-14T09:30:30,251 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:44641-0x10115d06ec80001, quorum=127.0.0.1:62495, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T09:30:30,252 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:34157-0x10115d06ec80000, quorum=127.0.0.1:62495, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T09:30:30,252 INFO [RS:0;83f56b55f2af:44641 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-14T09:30:30,252 INFO [RS:0;83f56b55f2af:44641 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-14T09:30:30,252 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-14T09:30:30,252 INFO [RS:0;83f56b55f2af:44641 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-14T09:30:30,252 INFO [RS:0;83f56b55f2af:44641 {}] regionserver.HRegionServer(959): stopping server 83f56b55f2af,44641,1731576628380 2024-11-14T09:30:30,252 INFO [RS:0;83f56b55f2af:44641 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-14T09:30:30,252 INFO [RS:0;83f56b55f2af:44641 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;83f56b55f2af:44641. 2024-11-14T09:30:30,252 DEBUG [RS:0;83f56b55f2af:44641 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T09:30:30,252 DEBUG [RS:0;83f56b55f2af:44641 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T09:30:30,252 INFO [RS:0;83f56b55f2af:44641 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-14T09:30:30,253 INFO [RS:0;83f56b55f2af:44641 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-14T09:30:30,253 INFO [RS:0;83f56b55f2af:44641 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-14T09:30:30,253 INFO [RS:0;83f56b55f2af:44641 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-14T09:30:30,253 INFO [RS:0;83f56b55f2af:44641 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-14T09:30:30,253 DEBUG [RS:0;83f56b55f2af:44641 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-14T09:30:30,253 DEBUG [RS:0;83f56b55f2af:44641 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-14T09:30:30,253 DEBUG [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-14T09:30:30,253 INFO [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-14T09:30:30,253 DEBUG [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-14T09:30:30,253 DEBUG [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-14T09:30:30,253 DEBUG [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-14T09:30:30,254 INFO [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-14T09:30:30,273 DEBUG [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43643/user/jenkins/test-data/927f76c5-bf82-0c60-2d30-0adc8f93c824/data/hbase/meta/1588230740/.tmp/ns/67a69f1efff9464fa82abe3eb0454558 is 43, key is default/ns:d/1731576630153/Put/seqid=0 2024-11-14T09:30:30,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37071 is added to blk_1073741835_1011 (size=5153) 2024-11-14T09:30:30,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42707 is added to blk_1073741835_1011 (size=5153) 2024-11-14T09:30:30,280 INFO [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:43643/user/jenkins/test-data/927f76c5-bf82-0c60-2d30-0adc8f93c824/data/hbase/meta/1588230740/.tmp/ns/67a69f1efff9464fa82abe3eb0454558 2024-11-14T09:30:30,288 DEBUG [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43643/user/jenkins/test-data/927f76c5-bf82-0c60-2d30-0adc8f93c824/data/hbase/meta/1588230740/.tmp/ns/67a69f1efff9464fa82abe3eb0454558 as hdfs://localhost:43643/user/jenkins/test-data/927f76c5-bf82-0c60-2d30-0adc8f93c824/data/hbase/meta/1588230740/ns/67a69f1efff9464fa82abe3eb0454558 2024-11-14T09:30:30,294 INFO [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43643/user/jenkins/test-data/927f76c5-bf82-0c60-2d30-0adc8f93c824/data/hbase/meta/1588230740/ns/67a69f1efff9464fa82abe3eb0454558, entries=2, sequenceid=6, filesize=5.0 K 2024-11-14T09:30:30,296 INFO [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 42ms, sequenceid=6, compaction requested=false 2024-11-14T09:30:30,296 DEBUG [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-14T09:30:30,301 DEBUG [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43643/user/jenkins/test-data/927f76c5-bf82-0c60-2d30-0adc8f93c824/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-14T09:30:30,302 DEBUG [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-14T09:30:30,302 INFO [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-14T09:30:30,302 DEBUG [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731576630253Running coprocessor pre-close hooks at 1731576630253Disabling compacts and flushes for region at 1731576630253Disabling writes for close at 1731576630253Obtaining lock to block concurrent updates at 1731576630254 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1731576630254Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1731576630254Flushing stores of hbase:meta,,1.1588230740 at 1731576630255 (+1 ms)Flushing 1588230740/ns: creating writer at 1731576630255Flushing 1588230740/ns: appending metadata at 1731576630272 (+17 ms)Flushing 1588230740/ns: closing flushed file at 1731576630272Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4a594ce0: reopening flushed file at 1731576630287 (+15 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 42ms, sequenceid=6, compaction requested=false at 1731576630296 (+9 ms)Writing region close event to WAL at 1731576630297 (+1 ms)Running coprocessor post-close hooks at 1731576630302 (+5 ms)Closed at 1731576630302 2024-11-14T09:30:30,302 DEBUG [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-14T09:30:30,453 INFO [RS:0;83f56b55f2af:44641 {}] regionserver.HRegionServer(976): stopping server 83f56b55f2af,44641,1731576628380; all regions closed. 2024-11-14T09:30:30,454 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:30:30,454 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:30:30,454 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:30:30,455 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:30:30,455 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:30:30,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42707 is added to blk_1073741834_1010 (size=1152) 2024-11-14T09:30:30,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37071 is added to blk_1073741834_1010 (size=1152) 2024-11-14T09:30:30,460 DEBUG [RS:0;83f56b55f2af:44641 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/927f76c5-bf82-0c60-2d30-0adc8f93c824/oldWALs 2024-11-14T09:30:30,460 INFO [RS:0;83f56b55f2af:44641 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 83f56b55f2af%2C44641%2C1731576628380.meta:.meta(num 1731576630102) 2024-11-14T09:30:30,460 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:30:30,461 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:30:30,461 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:30:30,461 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:30:30,461 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:30:30,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37071 is added to blk_1073741833_1009 (size=93) 2024-11-14T09:30:30,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42707 is added to blk_1073741833_1009 (size=93) 2024-11-14T09:30:30,466 DEBUG [RS:0;83f56b55f2af:44641 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/927f76c5-bf82-0c60-2d30-0adc8f93c824/oldWALs 2024-11-14T09:30:30,466 INFO [RS:0;83f56b55f2af:44641 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 83f56b55f2af%2C44641%2C1731576628380:(num 1731576629715) 2024-11-14T09:30:30,466 DEBUG [RS:0;83f56b55f2af:44641 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T09:30:30,466 INFO [RS:0;83f56b55f2af:44641 {}] regionserver.LeaseManager(133): Closed leases 2024-11-14T09:30:30,466 INFO [RS:0;83f56b55f2af:44641 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-14T09:30:30,466 INFO [RS:0;83f56b55f2af:44641 {}] hbase.ChoreService(370): Chore service for: regionserver/83f56b55f2af:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-14T09:30:30,466 INFO [RS:0;83f56b55f2af:44641 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-14T09:30:30,466 INFO [regionserver/83f56b55f2af:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-14T09:30:30,466 INFO [RS:0;83f56b55f2af:44641 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44641 2024-11-14T09:30:30,469 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44641-0x10115d06ec80001, quorum=127.0.0.1:62495, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/83f56b55f2af,44641,1731576628380 2024-11-14T09:30:30,469 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34157-0x10115d06ec80000, quorum=127.0.0.1:62495, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-14T09:30:30,469 INFO [RS:0;83f56b55f2af:44641 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-14T09:30:30,472 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [83f56b55f2af,44641,1731576628380] 2024-11-14T09:30:30,473 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/83f56b55f2af,44641,1731576628380 already deleted, retry=false 2024-11-14T09:30:30,473 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 83f56b55f2af,44641,1731576628380 expired; onlineServers=0 2024-11-14T09:30:30,473 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '83f56b55f2af,34157,1731576628301' ***** 2024-11-14T09:30:30,473 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-14T09:30:30,473 INFO [M:0;83f56b55f2af:34157 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-14T09:30:30,473 INFO [M:0;83f56b55f2af:34157 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-14T09:30:30,473 DEBUG [M:0;83f56b55f2af:34157 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-14T09:30:30,473 DEBUG [M:0;83f56b55f2af:34157 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-14T09:30:30,473 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-14T09:30:30,473 DEBUG [master/83f56b55f2af:0:becomeActiveMaster-HFileCleaner.large.0-1731576629484 {}] cleaner.HFileCleaner(306): Exit Thread[master/83f56b55f2af:0:becomeActiveMaster-HFileCleaner.large.0-1731576629484,5,FailOnTimeoutGroup] 2024-11-14T09:30:30,473 DEBUG [master/83f56b55f2af:0:becomeActiveMaster-HFileCleaner.small.0-1731576629484 {}] cleaner.HFileCleaner(306): Exit Thread[master/83f56b55f2af:0:becomeActiveMaster-HFileCleaner.small.0-1731576629484,5,FailOnTimeoutGroup] 2024-11-14T09:30:30,474 INFO [M:0;83f56b55f2af:34157 {}] hbase.ChoreService(370): Chore service for: master/83f56b55f2af:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-14T09:30:30,474 INFO [M:0;83f56b55f2af:34157 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-14T09:30:30,474 DEBUG [M:0;83f56b55f2af:34157 {}] master.HMaster(1795): Stopping service threads 2024-11-14T09:30:30,474 INFO [M:0;83f56b55f2af:34157 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-14T09:30:30,474 INFO [M:0;83f56b55f2af:34157 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-14T09:30:30,474 INFO [M:0;83f56b55f2af:34157 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-14T09:30:30,474 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-14T09:30:30,475 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34157-0x10115d06ec80000, quorum=127.0.0.1:62495, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-14T09:30:30,475 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34157-0x10115d06ec80000, quorum=127.0.0.1:62495, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:30:30,475 DEBUG [M:0;83f56b55f2af:34157 {}] zookeeper.ZKUtil(347): master:34157-0x10115d06ec80000, quorum=127.0.0.1:62495, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-14T09:30:30,475 WARN [M:0;83f56b55f2af:34157 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-14T09:30:30,476 INFO [M:0;83f56b55f2af:34157 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:43643/user/jenkins/test-data/927f76c5-bf82-0c60-2d30-0adc8f93c824/.lastflushedseqids 2024-11-14T09:30:30,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42707 is added to blk_1073741836_1012 (size=99) 2024-11-14T09:30:30,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37071 is added to blk_1073741836_1012 (size=99) 2024-11-14T09:30:30,483 INFO [M:0;83f56b55f2af:34157 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-14T09:30:30,483 INFO [M:0;83f56b55f2af:34157 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-14T09:30:30,483 DEBUG [M:0;83f56b55f2af:34157 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-14T09:30:30,483 INFO [M:0;83f56b55f2af:34157 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:30:30,483 DEBUG [M:0;83f56b55f2af:34157 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:30:30,483 DEBUG [M:0;83f56b55f2af:34157 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-14T09:30:30,483 DEBUG [M:0;83f56b55f2af:34157 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:30:30,483 INFO [M:0;83f56b55f2af:34157 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-14T09:30:30,501 DEBUG [M:0;83f56b55f2af:34157 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43643/user/jenkins/test-data/927f76c5-bf82-0c60-2d30-0adc8f93c824/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/936d4492f4f149fea6861c08570dfe30 is 82, key is hbase:meta,,1/info:regioninfo/1731576630132/Put/seqid=0 2024-11-14T09:30:30,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42707 is added to blk_1073741837_1013 (size=5672) 2024-11-14T09:30:30,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37071 is added to blk_1073741837_1013 (size=5672) 2024-11-14T09:30:30,508 INFO [M:0;83f56b55f2af:34157 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:43643/user/jenkins/test-data/927f76c5-bf82-0c60-2d30-0adc8f93c824/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/936d4492f4f149fea6861c08570dfe30 2024-11-14T09:30:30,531 DEBUG [M:0;83f56b55f2af:34157 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43643/user/jenkins/test-data/927f76c5-bf82-0c60-2d30-0adc8f93c824/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/74bba6db5daa4bccbdcb5a58d7395c86 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1731576630159/Put/seqid=0 2024-11-14T09:30:30,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37071 is added to blk_1073741838_1014 (size=5275) 2024-11-14T09:30:30,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42707 is added to blk_1073741838_1014 (size=5275) 2024-11-14T09:30:30,537 INFO [M:0;83f56b55f2af:34157 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:43643/user/jenkins/test-data/927f76c5-bf82-0c60-2d30-0adc8f93c824/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/74bba6db5daa4bccbdcb5a58d7395c86 2024-11-14T09:30:30,560 DEBUG [M:0;83f56b55f2af:34157 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43643/user/jenkins/test-data/927f76c5-bf82-0c60-2d30-0adc8f93c824/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/284eb6cd2cb345718a1fcaca81c74524 is 69, key is 83f56b55f2af,44641,1731576628380/rs:state/1731576629558/Put/seqid=0 2024-11-14T09:30:30,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37071 is added to blk_1073741839_1015 (size=5156) 2024-11-14T09:30:30,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42707 is added to blk_1073741839_1015 (size=5156) 2024-11-14T09:30:30,567 INFO [M:0;83f56b55f2af:34157 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:43643/user/jenkins/test-data/927f76c5-bf82-0c60-2d30-0adc8f93c824/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/284eb6cd2cb345718a1fcaca81c74524 2024-11-14T09:30:30,572 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44641-0x10115d06ec80001, quorum=127.0.0.1:62495, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T09:30:30,572 INFO [RS:0;83f56b55f2af:44641 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-14T09:30:30,572 INFO [RS:0;83f56b55f2af:44641 {}] regionserver.HRegionServer(1031): Exiting; stopping=83f56b55f2af,44641,1731576628380; zookeeper connection closed. 2024-11-14T09:30:30,572 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44641-0x10115d06ec80001, quorum=127.0.0.1:62495, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T09:30:30,572 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@35d1d759 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@35d1d759 2024-11-14T09:30:30,572 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-14T09:30:30,589 DEBUG [M:0;83f56b55f2af:34157 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43643/user/jenkins/test-data/927f76c5-bf82-0c60-2d30-0adc8f93c824/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/2b40df3c54c149ac882912470a3643e0 is 52, key is load_balancer_on/state:d/1731576630245/Put/seqid=0 2024-11-14T09:30:30,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37071 is added to blk_1073741840_1016 (size=5056) 2024-11-14T09:30:30,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42707 is added to blk_1073741840_1016 (size=5056) 2024-11-14T09:30:30,596 INFO [M:0;83f56b55f2af:34157 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:43643/user/jenkins/test-data/927f76c5-bf82-0c60-2d30-0adc8f93c824/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/2b40df3c54c149ac882912470a3643e0 2024-11-14T09:30:30,604 DEBUG [M:0;83f56b55f2af:34157 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43643/user/jenkins/test-data/927f76c5-bf82-0c60-2d30-0adc8f93c824/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/936d4492f4f149fea6861c08570dfe30 as hdfs://localhost:43643/user/jenkins/test-data/927f76c5-bf82-0c60-2d30-0adc8f93c824/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/936d4492f4f149fea6861c08570dfe30 2024-11-14T09:30:30,610 INFO [M:0;83f56b55f2af:34157 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43643/user/jenkins/test-data/927f76c5-bf82-0c60-2d30-0adc8f93c824/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/936d4492f4f149fea6861c08570dfe30, entries=8, sequenceid=29, filesize=5.5 K 2024-11-14T09:30:30,611 DEBUG [M:0;83f56b55f2af:34157 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43643/user/jenkins/test-data/927f76c5-bf82-0c60-2d30-0adc8f93c824/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/74bba6db5daa4bccbdcb5a58d7395c86 as hdfs://localhost:43643/user/jenkins/test-data/927f76c5-bf82-0c60-2d30-0adc8f93c824/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/74bba6db5daa4bccbdcb5a58d7395c86 2024-11-14T09:30:30,618 INFO [M:0;83f56b55f2af:34157 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43643/user/jenkins/test-data/927f76c5-bf82-0c60-2d30-0adc8f93c824/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/74bba6db5daa4bccbdcb5a58d7395c86, entries=3, sequenceid=29, filesize=5.2 K 2024-11-14T09:30:30,619 DEBUG [M:0;83f56b55f2af:34157 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43643/user/jenkins/test-data/927f76c5-bf82-0c60-2d30-0adc8f93c824/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/284eb6cd2cb345718a1fcaca81c74524 as hdfs://localhost:43643/user/jenkins/test-data/927f76c5-bf82-0c60-2d30-0adc8f93c824/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/284eb6cd2cb345718a1fcaca81c74524 2024-11-14T09:30:30,625 INFO [M:0;83f56b55f2af:34157 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43643/user/jenkins/test-data/927f76c5-bf82-0c60-2d30-0adc8f93c824/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/284eb6cd2cb345718a1fcaca81c74524, entries=1, sequenceid=29, filesize=5.0 K 2024-11-14T09:30:30,626 DEBUG [M:0;83f56b55f2af:34157 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43643/user/jenkins/test-data/927f76c5-bf82-0c60-2d30-0adc8f93c824/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/2b40df3c54c149ac882912470a3643e0 as hdfs://localhost:43643/user/jenkins/test-data/927f76c5-bf82-0c60-2d30-0adc8f93c824/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/2b40df3c54c149ac882912470a3643e0 2024-11-14T09:30:30,634 INFO [M:0;83f56b55f2af:34157 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43643/user/jenkins/test-data/927f76c5-bf82-0c60-2d30-0adc8f93c824/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/2b40df3c54c149ac882912470a3643e0, entries=1, sequenceid=29, filesize=4.9 K 2024-11-14T09:30:30,635 INFO [M:0;83f56b55f2af:34157 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 152ms, sequenceid=29, compaction requested=false 2024-11-14T09:30:30,637 INFO [M:0;83f56b55f2af:34157 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:30:30,637 DEBUG [M:0;83f56b55f2af:34157 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731576630483Disabling compacts and flushes for region at 1731576630483Disabling writes for close at 1731576630483Obtaining lock to block concurrent updates at 1731576630483Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731576630483Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1731576630484 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731576630484Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731576630484Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731576630501 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731576630501Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731576630514 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731576630530 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731576630530Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731576630543 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731576630560 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731576630560Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731576630573 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731576630588 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731576630588Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3852b9e7: reopening flushed file at 1731576630603 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5eb6f32b: reopening flushed file at 1731576630611 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@30e77c8d: reopening flushed file at 1731576630618 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@8403f72: reopening flushed file at 1731576630625 (+7 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 152ms, sequenceid=29, compaction requested=false at 1731576630635 (+10 ms)Writing region close event to WAL at 1731576630637 (+2 ms)Closed at 1731576630637 2024-11-14T09:30:30,638 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:30:30,638 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:30:30,638 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:30:30,638 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:30:30,639 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:30:30,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37071 is added to blk_1073741830_1006 (size=10311) 2024-11-14T09:30:30,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42707 is added to blk_1073741830_1006 (size=10311) 2024-11-14T09:30:30,642 INFO [M:0;83f56b55f2af:34157 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-14T09:30:30,642 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-14T09:30:30,642 INFO [M:0;83f56b55f2af:34157 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34157 2024-11-14T09:30:30,642 INFO [M:0;83f56b55f2af:34157 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-14T09:30:30,745 INFO [M:0;83f56b55f2af:34157 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-14T09:30:30,745 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34157-0x10115d06ec80000, quorum=127.0.0.1:62495, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T09:30:30,745 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34157-0x10115d06ec80000, quorum=127.0.0.1:62495, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T09:30:30,748 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3739906{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T09:30:30,749 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4b4297b9{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T09:30:30,749 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T09:30:30,749 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@74fcc9e4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T09:30:30,749 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@38d929c4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ff1b6325-f6cb-f083-b357-4454c50ef94b/hadoop.log.dir/,STOPPED} 2024-11-14T09:30:30,751 WARN [BP-1828366861-172.17.0.2-1731576627453 heartbeating to localhost/127.0.0.1:43643 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T09:30:30,751 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T09:30:30,751 WARN [BP-1828366861-172.17.0.2-1731576627453 heartbeating to localhost/127.0.0.1:43643 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1828366861-172.17.0.2-1731576627453 (Datanode Uuid fbf0f084-44e0-4e27-8b7f-2b0ba08ddafe) service to localhost/127.0.0.1:43643 2024-11-14T09:30:30,751 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T09:30:30,752 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ff1b6325-f6cb-f083-b357-4454c50ef94b/cluster_a9841446-98d7-ba18-9cc3-25d53009acc9/data/data3/current/BP-1828366861-172.17.0.2-1731576627453 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T09:30:30,752 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ff1b6325-f6cb-f083-b357-4454c50ef94b/cluster_a9841446-98d7-ba18-9cc3-25d53009acc9/data/data4/current/BP-1828366861-172.17.0.2-1731576627453 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T09:30:30,752 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T09:30:30,754 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2b1bb178{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T09:30:30,755 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1806f1a6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T09:30:30,755 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T09:30:30,755 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@232ba44{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T09:30:30,755 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@35b4906c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ff1b6325-f6cb-f083-b357-4454c50ef94b/hadoop.log.dir/,STOPPED} 2024-11-14T09:30:30,757 WARN [BP-1828366861-172.17.0.2-1731576627453 heartbeating to localhost/127.0.0.1:43643 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T09:30:30,757 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T09:30:30,757 WARN [BP-1828366861-172.17.0.2-1731576627453 heartbeating to localhost/127.0.0.1:43643 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1828366861-172.17.0.2-1731576627453 (Datanode Uuid 129d72b2-8933-40ae-9c0a-264da67edadc) service to localhost/127.0.0.1:43643 2024-11-14T09:30:30,757 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T09:30:30,757 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ff1b6325-f6cb-f083-b357-4454c50ef94b/cluster_a9841446-98d7-ba18-9cc3-25d53009acc9/data/data1/current/BP-1828366861-172.17.0.2-1731576627453 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T09:30:30,758 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ff1b6325-f6cb-f083-b357-4454c50ef94b/cluster_a9841446-98d7-ba18-9cc3-25d53009acc9/data/data2/current/BP-1828366861-172.17.0.2-1731576627453 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T09:30:30,758 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T09:30:30,764 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@74d3eb32{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-14T09:30:30,764 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@245042fb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T09:30:30,764 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T09:30:30,765 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@19bf40f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T09:30:30,765 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1490ab38{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ff1b6325-f6cb-f083-b357-4454c50ef94b/hadoop.log.dir/,STOPPED} 2024-11-14T09:30:30,771 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-14T09:30:30,787 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-14T09:30:30,787 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-14T09:30:30,787 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ff1b6325-f6cb-f083-b357-4454c50ef94b/hadoop.log.dir so I do NOT create it in target/test-data/ffe8a70d-1c55-147c-4cf7-f32b5e8ed0b0 2024-11-14T09:30:30,787 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ff1b6325-f6cb-f083-b357-4454c50ef94b/hadoop.tmp.dir so I do NOT create it in target/test-data/ffe8a70d-1c55-147c-4cf7-f32b5e8ed0b0 2024-11-14T09:30:30,787 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ffe8a70d-1c55-147c-4cf7-f32b5e8ed0b0/cluster_81ef3b20-c9a7-5a06-86c1-55a43558ceb4, deleteOnExit=true 2024-11-14T09:30:30,787 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-14T09:30:30,788 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ffe8a70d-1c55-147c-4cf7-f32b5e8ed0b0/test.cache.data in system properties and HBase conf 2024-11-14T09:30:30,788 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ffe8a70d-1c55-147c-4cf7-f32b5e8ed0b0/hadoop.tmp.dir in system properties and HBase conf 2024-11-14T09:30:30,788 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ffe8a70d-1c55-147c-4cf7-f32b5e8ed0b0/hadoop.log.dir in system properties and HBase conf 2024-11-14T09:30:30,788 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ffe8a70d-1c55-147c-4cf7-f32b5e8ed0b0/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-14T09:30:30,788 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ffe8a70d-1c55-147c-4cf7-f32b5e8ed0b0/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-14T09:30:30,788 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-14T09:30:30,788 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-14T09:30:30,789 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ffe8a70d-1c55-147c-4cf7-f32b5e8ed0b0/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-14T09:30:30,789 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ffe8a70d-1c55-147c-4cf7-f32b5e8ed0b0/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-14T09:30:30,789 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ffe8a70d-1c55-147c-4cf7-f32b5e8ed0b0/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-14T09:30:30,789 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ffe8a70d-1c55-147c-4cf7-f32b5e8ed0b0/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-14T09:30:30,789 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ffe8a70d-1c55-147c-4cf7-f32b5e8ed0b0/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-14T09:30:30,789 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ffe8a70d-1c55-147c-4cf7-f32b5e8ed0b0/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-14T09:30:30,789 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ffe8a70d-1c55-147c-4cf7-f32b5e8ed0b0/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-14T09:30:30,789 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ffe8a70d-1c55-147c-4cf7-f32b5e8ed0b0/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-14T09:30:30,789 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ffe8a70d-1c55-147c-4cf7-f32b5e8ed0b0/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-14T09:30:30,790 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ffe8a70d-1c55-147c-4cf7-f32b5e8ed0b0/nfs.dump.dir in system properties and HBase conf 2024-11-14T09:30:30,790 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ffe8a70d-1c55-147c-4cf7-f32b5e8ed0b0/java.io.tmpdir in system properties and HBase conf 2024-11-14T09:30:30,790 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ffe8a70d-1c55-147c-4cf7-f32b5e8ed0b0/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-14T09:30:30,790 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ffe8a70d-1c55-147c-4cf7-f32b5e8ed0b0/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-14T09:30:30,790 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ffe8a70d-1c55-147c-4cf7-f32b5e8ed0b0/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-14T09:30:30,804 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-14T09:30:30,881 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T09:30:30,887 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T09:30:30,890 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T09:30:30,890 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T09:30:30,890 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-14T09:30:30,891 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T09:30:30,892 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@320b7eeb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ffe8a70d-1c55-147c-4cf7-f32b5e8ed0b0/hadoop.log.dir/,AVAILABLE} 2024-11-14T09:30:30,892 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3789f604{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T09:30:31,009 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3287cde6{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ffe8a70d-1c55-147c-4cf7-f32b5e8ed0b0/java.io.tmpdir/jetty-localhost-40735-hadoop-hdfs-3_4_1-tests_jar-_-any-8637256018948541407/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-14T09:30:31,009 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@8b1f0fc{HTTP/1.1, (http/1.1)}{localhost:40735} 2024-11-14T09:30:31,010 INFO [Time-limited test {}] server.Server(415): Started @105196ms 2024-11-14T09:30:31,024 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-14T09:30:31,095 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T09:30:31,099 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T09:30:31,099 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T09:30:31,099 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T09:30:31,099 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-14T09:30:31,100 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@72770c41{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ffe8a70d-1c55-147c-4cf7-f32b5e8ed0b0/hadoop.log.dir/,AVAILABLE} 2024-11-14T09:30:31,100 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@778fdefb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T09:30:31,215 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2c46fc61{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ffe8a70d-1c55-147c-4cf7-f32b5e8ed0b0/java.io.tmpdir/jetty-localhost-46461-hadoop-hdfs-3_4_1-tests_jar-_-any-3366145806179603892/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T09:30:31,215 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1173af39{HTTP/1.1, (http/1.1)}{localhost:46461} 2024-11-14T09:30:31,215 INFO [Time-limited test {}] server.Server(415): Started @105402ms 2024-11-14T09:30:31,217 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T09:30:31,254 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T09:30:31,258 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T09:30:31,258 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T09:30:31,258 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T09:30:31,259 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-14T09:30:31,261 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@c3714ef{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ffe8a70d-1c55-147c-4cf7-f32b5e8ed0b0/hadoop.log.dir/,AVAILABLE} 2024-11-14T09:30:31,261 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@30985369{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T09:30:31,329 WARN [Thread-656 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ffe8a70d-1c55-147c-4cf7-f32b5e8ed0b0/cluster_81ef3b20-c9a7-5a06-86c1-55a43558ceb4/data/data2/current/BP-2105580279-172.17.0.2-1731576630823/current, will proceed with Du for space computation calculation, 2024-11-14T09:30:31,329 WARN [Thread-655 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ffe8a70d-1c55-147c-4cf7-f32b5e8ed0b0/cluster_81ef3b20-c9a7-5a06-86c1-55a43558ceb4/data/data1/current/BP-2105580279-172.17.0.2-1731576630823/current, will proceed with Du for space computation calculation, 2024-11-14T09:30:31,348 WARN [Thread-634 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T09:30:31,351 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb1fe1b4658aae509 with lease ID 0xe55f664660dab15: Processing first storage report for DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f from datanode DatanodeRegistration(127.0.0.1:37737, datanodeUuid=155aa341-5430-4b94-8a29-880e06e197a9, infoPort=45963, infoSecurePort=0, ipcPort=44553, storageInfo=lv=-57;cid=testClusterID;nsid=1875755621;c=1731576630823) 2024-11-14T09:30:31,351 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb1fe1b4658aae509 with lease ID 0xe55f664660dab15: from storage DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f node DatanodeRegistration(127.0.0.1:37737, datanodeUuid=155aa341-5430-4b94-8a29-880e06e197a9, infoPort=45963, infoSecurePort=0, ipcPort=44553, storageInfo=lv=-57;cid=testClusterID;nsid=1875755621;c=1731576630823), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T09:30:31,351 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb1fe1b4658aae509 with lease ID 0xe55f664660dab15: Processing first storage report for DS-527f8c1e-6869-45c7-aa08-5753b7df838d from datanode DatanodeRegistration(127.0.0.1:37737, datanodeUuid=155aa341-5430-4b94-8a29-880e06e197a9, infoPort=45963, infoSecurePort=0, ipcPort=44553, storageInfo=lv=-57;cid=testClusterID;nsid=1875755621;c=1731576630823) 2024-11-14T09:30:31,351 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb1fe1b4658aae509 with lease ID 0xe55f664660dab15: from storage DS-527f8c1e-6869-45c7-aa08-5753b7df838d node DatanodeRegistration(127.0.0.1:37737, datanodeUuid=155aa341-5430-4b94-8a29-880e06e197a9, infoPort=45963, infoSecurePort=0, ipcPort=44553, storageInfo=lv=-57;cid=testClusterID;nsid=1875755621;c=1731576630823), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T09:30:31,380 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6a102770{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ffe8a70d-1c55-147c-4cf7-f32b5e8ed0b0/java.io.tmpdir/jetty-localhost-37043-hadoop-hdfs-3_4_1-tests_jar-_-any-13665368846113647738/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T09:30:31,380 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@385db9bf{HTTP/1.1, (http/1.1)}{localhost:37043} 2024-11-14T09:30:31,380 INFO [Time-limited test {}] server.Server(415): Started @105567ms 2024-11-14T09:30:31,382 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T09:30:31,534 WARN [Thread-682 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ffe8a70d-1c55-147c-4cf7-f32b5e8ed0b0/cluster_81ef3b20-c9a7-5a06-86c1-55a43558ceb4/data/data4/current/BP-2105580279-172.17.0.2-1731576630823/current, will proceed with Du for space computation calculation, 2024-11-14T09:30:31,534 WARN [Thread-681 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ffe8a70d-1c55-147c-4cf7-f32b5e8ed0b0/cluster_81ef3b20-c9a7-5a06-86c1-55a43558ceb4/data/data3/current/BP-2105580279-172.17.0.2-1731576630823/current, will proceed with Du for space computation calculation, 2024-11-14T09:30:31,555 WARN [Thread-670 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T09:30:31,557 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x83080d512e86d698 with lease ID 0xe55f664660dab16: Processing first storage report for DS-820f820f-a0a2-4a8b-914e-94b63dc152d2 from datanode DatanodeRegistration(127.0.0.1:46553, datanodeUuid=340cd0fe-f1d0-4fc4-8a97-207897c75a92, infoPort=34151, infoSecurePort=0, ipcPort=43633, storageInfo=lv=-57;cid=testClusterID;nsid=1875755621;c=1731576630823) 2024-11-14T09:30:31,557 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x83080d512e86d698 with lease ID 0xe55f664660dab16: from storage DS-820f820f-a0a2-4a8b-914e-94b63dc152d2 node DatanodeRegistration(127.0.0.1:46553, datanodeUuid=340cd0fe-f1d0-4fc4-8a97-207897c75a92, infoPort=34151, infoSecurePort=0, ipcPort=43633, storageInfo=lv=-57;cid=testClusterID;nsid=1875755621;c=1731576630823), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T09:30:31,558 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x83080d512e86d698 with lease ID 0xe55f664660dab16: Processing first storage report for DS-63205298-e9c4-4b00-967d-5a4dd529df6d from datanode DatanodeRegistration(127.0.0.1:46553, datanodeUuid=340cd0fe-f1d0-4fc4-8a97-207897c75a92, infoPort=34151, infoSecurePort=0, ipcPort=43633, storageInfo=lv=-57;cid=testClusterID;nsid=1875755621;c=1731576630823) 2024-11-14T09:30:31,558 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x83080d512e86d698 with lease ID 0xe55f664660dab16: from storage DS-63205298-e9c4-4b00-967d-5a4dd529df6d node DatanodeRegistration(127.0.0.1:46553, datanodeUuid=340cd0fe-f1d0-4fc4-8a97-207897c75a92, infoPort=34151, infoSecurePort=0, ipcPort=43633, storageInfo=lv=-57;cid=testClusterID;nsid=1875755621;c=1731576630823), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T09:30:31,576 INFO [regionserver/83f56b55f2af:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-14T09:30:31,614 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ffe8a70d-1c55-147c-4cf7-f32b5e8ed0b0 2024-11-14T09:30:31,617 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ffe8a70d-1c55-147c-4cf7-f32b5e8ed0b0/cluster_81ef3b20-c9a7-5a06-86c1-55a43558ceb4/zookeeper_0, clientPort=56059, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ffe8a70d-1c55-147c-4cf7-f32b5e8ed0b0/cluster_81ef3b20-c9a7-5a06-86c1-55a43558ceb4/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ffe8a70d-1c55-147c-4cf7-f32b5e8ed0b0/cluster_81ef3b20-c9a7-5a06-86c1-55a43558ceb4/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-14T09:30:31,618 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=56059 2024-11-14T09:30:31,618 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:30:31,620 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:30:31,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46553 is added to blk_1073741825_1001 (size=7) 2024-11-14T09:30:31,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37737 is added to blk_1073741825_1001 (size=7) 2024-11-14T09:30:31,631 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60 with version=8 2024-11-14T09:30:31,631 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/hbase-staging 2024-11-14T09:30:31,633 INFO [Time-limited test {}] client.ConnectionUtils(128): master/83f56b55f2af:0 server-side Connection retries=45 2024-11-14T09:30:31,633 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T09:30:31,633 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-14T09:30:31,633 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-14T09:30:31,633 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T09:30:31,633 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-14T09:30:31,633 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-14T09:30:31,633 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-14T09:30:31,634 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40525 2024-11-14T09:30:31,635 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:40525 connecting to ZooKeeper ensemble=127.0.0.1:56059 2024-11-14T09:30:31,645 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:405250x0, quorum=127.0.0.1:56059, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-14T09:30:31,645 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:40525-0x10115d07bd00000 connected 2024-11-14T09:30:31,663 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:30:31,665 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:30:31,668 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40525-0x10115d07bd00000, quorum=127.0.0.1:56059, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T09:30:31,668 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60, hbase.cluster.distributed=false 2024-11-14T09:30:31,670 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40525-0x10115d07bd00000, quorum=127.0.0.1:56059, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-14T09:30:31,671 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40525 2024-11-14T09:30:31,671 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40525 2024-11-14T09:30:31,671 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40525 2024-11-14T09:30:31,671 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40525 2024-11-14T09:30:31,672 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40525 2024-11-14T09:30:31,688 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/83f56b55f2af:0 server-side Connection retries=45 2024-11-14T09:30:31,688 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T09:30:31,688 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-14T09:30:31,688 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-14T09:30:31,688 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T09:30:31,688 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-14T09:30:31,688 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-14T09:30:31,689 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-14T09:30:31,689 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44811 2024-11-14T09:30:31,691 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:44811 connecting to ZooKeeper ensemble=127.0.0.1:56059 2024-11-14T09:30:31,691 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:30:31,693 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:30:31,698 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:448110x0, quorum=127.0.0.1:56059, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-14T09:30:31,699 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:448110x0, quorum=127.0.0.1:56059, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T09:30:31,699 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:44811-0x10115d07bd00001 connected 2024-11-14T09:30:31,699 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-14T09:30:31,703 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-14T09:30:31,704 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44811-0x10115d07bd00001, quorum=127.0.0.1:56059, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-14T09:30:31,705 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44811-0x10115d07bd00001, quorum=127.0.0.1:56059, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-14T09:30:31,706 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44811 2024-11-14T09:30:31,708 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44811 2024-11-14T09:30:31,708 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44811 2024-11-14T09:30:31,709 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44811 2024-11-14T09:30:31,710 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44811 2024-11-14T09:30:31,722 DEBUG [M:0;83f56b55f2af:40525 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;83f56b55f2af:40525 2024-11-14T09:30:31,724 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/83f56b55f2af,40525,1731576631633 2024-11-14T09:30:31,726 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40525-0x10115d07bd00000, quorum=127.0.0.1:56059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T09:30:31,726 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44811-0x10115d07bd00001, quorum=127.0.0.1:56059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T09:30:31,727 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40525-0x10115d07bd00000, quorum=127.0.0.1:56059, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/83f56b55f2af,40525,1731576631633 2024-11-14T09:30:31,729 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44811-0x10115d07bd00001, quorum=127.0.0.1:56059, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-14T09:30:31,729 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40525-0x10115d07bd00000, quorum=127.0.0.1:56059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:30:31,729 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44811-0x10115d07bd00001, quorum=127.0.0.1:56059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:30:31,730 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40525-0x10115d07bd00000, quorum=127.0.0.1:56059, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-14T09:30:31,730 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/83f56b55f2af,40525,1731576631633 from backup master directory 2024-11-14T09:30:31,732 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40525-0x10115d07bd00000, quorum=127.0.0.1:56059, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/83f56b55f2af,40525,1731576631633 2024-11-14T09:30:31,732 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40525-0x10115d07bd00000, quorum=127.0.0.1:56059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T09:30:31,732 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44811-0x10115d07bd00001, quorum=127.0.0.1:56059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T09:30:31,732 WARN [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-14T09:30:31,732 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=83f56b55f2af,40525,1731576631633 2024-11-14T09:30:31,736 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/hbase.id] with ID: 363fd606-b134-4ef3-ad53-2f753a9ae3bd 2024-11-14T09:30:31,736 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/.tmp/hbase.id 2024-11-14T09:30:31,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37737 is added to blk_1073741826_1002 (size=42) 2024-11-14T09:30:31,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46553 is added to blk_1073741826_1002 (size=42) 2024-11-14T09:30:31,744 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/.tmp/hbase.id]:[hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/hbase.id] 2024-11-14T09:30:31,758 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:30:31,758 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-14T09:30:31,760 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-14T09:30:31,762 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40525-0x10115d07bd00000, quorum=127.0.0.1:56059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:30:31,762 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44811-0x10115d07bd00001, quorum=127.0.0.1:56059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:30:31,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37737 is added to blk_1073741827_1003 (size=196) 2024-11-14T09:30:31,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46553 is added to blk_1073741827_1003 (size=196) 2024-11-14T09:30:31,772 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-14T09:30:31,773 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-14T09:30:31,773 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T09:30:31,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37737 is added to blk_1073741828_1004 (size=1189) 2024-11-14T09:30:31,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46553 is added to blk_1073741828_1004 (size=1189) 2024-11-14T09:30:31,783 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/MasterData/data/master/store 2024-11-14T09:30:31,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37737 is added to blk_1073741829_1005 (size=34) 2024-11-14T09:30:31,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46553 is added to blk_1073741829_1005 (size=34) 2024-11-14T09:30:31,792 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T09:30:31,793 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-14T09:30:31,793 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:30:31,793 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:30:31,793 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-14T09:30:31,793 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:30:31,793 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:30:31,793 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731576631793Disabling compacts and flushes for region at 1731576631793Disabling writes for close at 1731576631793Writing region close event to WAL at 1731576631793Closed at 1731576631793 2024-11-14T09:30:31,793 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-14T09:30:31,794 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-14T09:30:31,794 WARN [master/83f56b55f2af:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/MasterData/data/master/store/.initializing 2024-11-14T09:30:31,794 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/MasterData/WALs/83f56b55f2af,40525,1731576631633 2024-11-14T09:30:31,795 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-14T09:30:31,795 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-14T09:30:31,798 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=83f56b55f2af%2C40525%2C1731576631633, suffix=, logDir=hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/MasterData/WALs/83f56b55f2af,40525,1731576631633, archiveDir=hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/MasterData/oldWALs, maxLogs=10 2024-11-14T09:30:31,798 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 83f56b55f2af%2C40525%2C1731576631633.1731576631798 2024-11-14T09:30:31,804 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/MasterData/WALs/83f56b55f2af,40525,1731576631633/83f56b55f2af%2C40525%2C1731576631633.1731576631798 2024-11-14T09:30:31,808 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34151:34151),(127.0.0.1/127.0.0.1:45963:45963)] 2024-11-14T09:30:31,809 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-14T09:30:31,809 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T09:30:31,809 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:30:31,809 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:30:31,811 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:30:31,813 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-14T09:30:31,813 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:30:31,814 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:30:31,814 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:30:31,815 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-14T09:30:31,815 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:30:31,816 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T09:30:31,816 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:30:31,817 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-14T09:30:31,818 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:30:31,818 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T09:30:31,818 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:30:31,819 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-14T09:30:31,820 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:30:31,820 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T09:30:31,820 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:30:31,821 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:30:31,822 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:30:31,823 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:30:31,823 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:30:31,824 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-14T09:30:31,826 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:30:31,828 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T09:30:31,829 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=836008, jitterRate=0.06304013729095459}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-14T09:30:31,830 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731576631809Initializing all the Stores at 1731576631811 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731576631811Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731576631811Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731576631811Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731576631811Cleaning up temporary data from old regions at 1731576631823 (+12 ms)Region opened successfully at 1731576631830 (+7 ms) 2024-11-14T09:30:31,831 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-14T09:30:31,835 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@64785227, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=83f56b55f2af/172.17.0.2:0 2024-11-14T09:30:31,836 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-14T09:30:31,836 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-14T09:30:31,836 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-14T09:30:31,836 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-14T09:30:31,837 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-14T09:30:31,838 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-14T09:30:31,838 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-14T09:30:31,840 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-14T09:30:31,841 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40525-0x10115d07bd00000, quorum=127.0.0.1:56059, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-14T09:30:31,843 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-14T09:30:31,843 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-14T09:30:31,844 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40525-0x10115d07bd00000, quorum=127.0.0.1:56059, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-14T09:30:31,846 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-14T09:30:31,847 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-14T09:30:31,848 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40525-0x10115d07bd00000, quorum=127.0.0.1:56059, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-14T09:30:31,849 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-14T09:30:31,850 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40525-0x10115d07bd00000, quorum=127.0.0.1:56059, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-14T09:30:31,852 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-14T09:30:31,854 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40525-0x10115d07bd00000, quorum=127.0.0.1:56059, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-14T09:30:31,855 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-14T09:30:31,858 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40525-0x10115d07bd00000, quorum=127.0.0.1:56059, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-14T09:30:31,858 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44811-0x10115d07bd00001, quorum=127.0.0.1:56059, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-14T09:30:31,858 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40525-0x10115d07bd00000, quorum=127.0.0.1:56059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:30:31,858 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44811-0x10115d07bd00001, quorum=127.0.0.1:56059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:30:31,859 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=83f56b55f2af,40525,1731576631633, sessionid=0x10115d07bd00000, setting cluster-up flag (Was=false) 2024-11-14T09:30:31,862 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40525-0x10115d07bd00000, quorum=127.0.0.1:56059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:30:31,862 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44811-0x10115d07bd00001, quorum=127.0.0.1:56059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:30:31,868 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-14T09:30:31,869 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=83f56b55f2af,40525,1731576631633 2024-11-14T09:30:31,873 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40525-0x10115d07bd00000, quorum=127.0.0.1:56059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:30:31,873 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44811-0x10115d07bd00001, quorum=127.0.0.1:56059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:30:31,874 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:30:31,879 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-14T09:30:31,880 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=83f56b55f2af,40525,1731576631633 2024-11-14T09:30:31,881 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:30:31,881 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-14T09:30:31,883 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-14T09:30:31,883 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-14T09:30:31,884 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-14T09:30:31,884 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 83f56b55f2af,40525,1731576631633 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-14T09:30:31,885 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/83f56b55f2af:0, corePoolSize=5, maxPoolSize=5 2024-11-14T09:30:31,885 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/83f56b55f2af:0, corePoolSize=5, maxPoolSize=5 2024-11-14T09:30:31,885 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/83f56b55f2af:0, corePoolSize=5, maxPoolSize=5 2024-11-14T09:30:31,885 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/83f56b55f2af:0, corePoolSize=5, maxPoolSize=5 2024-11-14T09:30:31,885 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/83f56b55f2af:0, corePoolSize=10, maxPoolSize=10 2024-11-14T09:30:31,885 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:30:31,885 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/83f56b55f2af:0, corePoolSize=2, maxPoolSize=2 2024-11-14T09:30:31,885 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:30:31,889 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731576661889 2024-11-14T09:30:31,889 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-14T09:30:31,889 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-14T09:30:31,889 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-14T09:30:31,889 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-14T09:30:31,889 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-14T09:30:31,889 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-14T09:30:31,889 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-14T09:30:31,890 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-14T09:30:31,890 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-14T09:30:31,890 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-14T09:30:31,890 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T09:30:31,890 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-14T09:30:31,890 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-14T09:30:31,890 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-14T09:30:31,890 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/83f56b55f2af:0:becomeActiveMaster-HFileCleaner.large.0-1731576631890,5,FailOnTimeoutGroup] 2024-11-14T09:30:31,891 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/83f56b55f2af:0:becomeActiveMaster-HFileCleaner.small.0-1731576631890,5,FailOnTimeoutGroup] 2024-11-14T09:30:31,891 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-14T09:30:31,891 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-14T09:30:31,891 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-14T09:30:31,891 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-14T09:30:31,892 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:30:31,892 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-14T09:30:31,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46553 is added to blk_1073741831_1007 (size=1321) 2024-11-14T09:30:31,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37737 is added to blk_1073741831_1007 (size=1321) 2024-11-14T09:30:31,900 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-14T09:30:31,900 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60 2024-11-14T09:30:31,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46553 is added to blk_1073741832_1008 (size=32) 2024-11-14T09:30:31,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37737 is added to blk_1073741832_1008 (size=32) 2024-11-14T09:30:31,907 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T09:30:31,909 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-14T09:30:31,910 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-14T09:30:31,910 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:30:31,911 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:30:31,911 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-14T09:30:31,912 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-14T09:30:31,912 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:30:31,912 INFO [RS:0;83f56b55f2af:44811 {}] regionserver.HRegionServer(746): ClusterId : 363fd606-b134-4ef3-ad53-2f753a9ae3bd 2024-11-14T09:30:31,913 DEBUG [RS:0;83f56b55f2af:44811 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-14T09:30:31,913 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:30:31,913 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-14T09:30:31,941 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-14T09:30:31,941 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:30:31,942 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:30:31,942 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-14T09:30:31,942 DEBUG [RS:0;83f56b55f2af:44811 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-14T09:30:31,942 DEBUG [RS:0;83f56b55f2af:44811 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-14T09:30:31,944 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-14T09:30:31,944 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:30:31,945 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:30:31,945 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-14T09:30:31,946 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/hbase/meta/1588230740 2024-11-14T09:30:31,946 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/hbase/meta/1588230740 2024-11-14T09:30:31,946 DEBUG [RS:0;83f56b55f2af:44811 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-14T09:30:31,946 DEBUG [RS:0;83f56b55f2af:44811 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7c472d17, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=83f56b55f2af/172.17.0.2:0 2024-11-14T09:30:31,948 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-14T09:30:31,948 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-14T09:30:31,948 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-14T09:30:31,950 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-14T09:30:31,953 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T09:30:31,953 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=820717, jitterRate=0.04359638690948486}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-14T09:30:31,954 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731576631907Initializing all the Stores at 1731576631908 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731576631908Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731576631908Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731576631908Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731576631908Cleaning up temporary data from old regions at 1731576631948 (+40 ms)Region opened successfully at 1731576631954 (+6 ms) 2024-11-14T09:30:31,954 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-14T09:30:31,955 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-14T09:30:31,955 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-14T09:30:31,955 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-14T09:30:31,955 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-14T09:30:31,956 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-14T09:30:31,956 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731576631954Disabling compacts and flushes for region at 1731576631954Disabling writes for close at 1731576631955 (+1 ms)Writing region close event to WAL at 1731576631956 (+1 ms)Closed at 1731576631956 2024-11-14T09:30:31,958 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T09:30:31,958 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-14T09:30:31,958 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-14T09:30:31,960 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-14T09:30:31,960 DEBUG [RS:0;83f56b55f2af:44811 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;83f56b55f2af:44811 2024-11-14T09:30:31,960 INFO [RS:0;83f56b55f2af:44811 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-14T09:30:31,960 INFO [RS:0;83f56b55f2af:44811 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-14T09:30:31,960 DEBUG [RS:0;83f56b55f2af:44811 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-14T09:30:31,961 INFO [RS:0;83f56b55f2af:44811 {}] regionserver.HRegionServer(2659): reportForDuty to master=83f56b55f2af,40525,1731576631633 with port=44811, startcode=1731576631688 2024-11-14T09:30:31,961 DEBUG [RS:0;83f56b55f2af:44811 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-14T09:30:31,961 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-14T09:30:31,963 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55221, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-14T09:30:31,964 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40525 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 83f56b55f2af,44811,1731576631688 2024-11-14T09:30:31,964 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40525 {}] master.ServerManager(517): Registering regionserver=83f56b55f2af,44811,1731576631688 2024-11-14T09:30:31,965 DEBUG [RS:0;83f56b55f2af:44811 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60 2024-11-14T09:30:31,965 DEBUG [RS:0;83f56b55f2af:44811 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:36133 2024-11-14T09:30:31,965 DEBUG [RS:0;83f56b55f2af:44811 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-14T09:30:31,967 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40525-0x10115d07bd00000, quorum=127.0.0.1:56059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-14T09:30:31,968 DEBUG [RS:0;83f56b55f2af:44811 {}] zookeeper.ZKUtil(111): regionserver:44811-0x10115d07bd00001, quorum=127.0.0.1:56059, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/83f56b55f2af,44811,1731576631688 2024-11-14T09:30:31,968 WARN [RS:0;83f56b55f2af:44811 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-14T09:30:31,968 INFO [RS:0;83f56b55f2af:44811 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T09:30:31,968 DEBUG [RS:0;83f56b55f2af:44811 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688 2024-11-14T09:30:31,968 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [83f56b55f2af,44811,1731576631688] 2024-11-14T09:30:31,972 INFO [RS:0;83f56b55f2af:44811 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-14T09:30:31,973 INFO [RS:0;83f56b55f2af:44811 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-14T09:30:31,973 INFO [RS:0;83f56b55f2af:44811 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-14T09:30:31,974 INFO [RS:0;83f56b55f2af:44811 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T09:30:31,974 INFO [RS:0;83f56b55f2af:44811 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-14T09:30:31,975 INFO [RS:0;83f56b55f2af:44811 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-14T09:30:31,975 INFO [RS:0;83f56b55f2af:44811 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-14T09:30:31,975 DEBUG [RS:0;83f56b55f2af:44811 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:30:31,975 DEBUG [RS:0;83f56b55f2af:44811 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:30:31,975 DEBUG [RS:0;83f56b55f2af:44811 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:30:31,975 DEBUG [RS:0;83f56b55f2af:44811 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:30:31,975 DEBUG [RS:0;83f56b55f2af:44811 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:30:31,975 DEBUG [RS:0;83f56b55f2af:44811 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/83f56b55f2af:0, corePoolSize=2, maxPoolSize=2 2024-11-14T09:30:31,975 DEBUG [RS:0;83f56b55f2af:44811 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:30:31,975 DEBUG [RS:0;83f56b55f2af:44811 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:30:31,975 DEBUG [RS:0;83f56b55f2af:44811 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:30:31,975 DEBUG [RS:0;83f56b55f2af:44811 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:30:31,975 DEBUG [RS:0;83f56b55f2af:44811 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:30:31,975 DEBUG [RS:0;83f56b55f2af:44811 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:30:31,975 DEBUG [RS:0;83f56b55f2af:44811 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/83f56b55f2af:0, corePoolSize=3, maxPoolSize=3 2024-11-14T09:30:31,975 DEBUG [RS:0;83f56b55f2af:44811 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/83f56b55f2af:0, corePoolSize=3, maxPoolSize=3 2024-11-14T09:30:31,976 INFO [RS:0;83f56b55f2af:44811 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-14T09:30:31,976 INFO [RS:0;83f56b55f2af:44811 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-14T09:30:31,976 INFO [RS:0;83f56b55f2af:44811 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T09:30:31,976 INFO [RS:0;83f56b55f2af:44811 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-14T09:30:31,976 INFO [RS:0;83f56b55f2af:44811 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-14T09:30:31,976 INFO [RS:0;83f56b55f2af:44811 {}] hbase.ChoreService(168): Chore ScheduledChore name=83f56b55f2af,44811,1731576631688-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-14T09:30:31,991 INFO [RS:0;83f56b55f2af:44811 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-14T09:30:31,991 INFO [RS:0;83f56b55f2af:44811 {}] hbase.ChoreService(168): Chore ScheduledChore name=83f56b55f2af,44811,1731576631688-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T09:30:31,991 INFO [RS:0;83f56b55f2af:44811 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T09:30:31,991 INFO [RS:0;83f56b55f2af:44811 {}] regionserver.Replication(171): 83f56b55f2af,44811,1731576631688 started 2024-11-14T09:30:32,005 INFO [RS:0;83f56b55f2af:44811 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T09:30:32,005 INFO [RS:0;83f56b55f2af:44811 {}] regionserver.HRegionServer(1482): Serving as 83f56b55f2af,44811,1731576631688, RpcServer on 83f56b55f2af/172.17.0.2:44811, sessionid=0x10115d07bd00001 2024-11-14T09:30:32,005 DEBUG [RS:0;83f56b55f2af:44811 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-14T09:30:32,005 DEBUG [RS:0;83f56b55f2af:44811 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 83f56b55f2af,44811,1731576631688 2024-11-14T09:30:32,005 DEBUG [RS:0;83f56b55f2af:44811 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '83f56b55f2af,44811,1731576631688' 2024-11-14T09:30:32,005 DEBUG [RS:0;83f56b55f2af:44811 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-14T09:30:32,005 DEBUG [RS:0;83f56b55f2af:44811 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-14T09:30:32,006 DEBUG [RS:0;83f56b55f2af:44811 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-14T09:30:32,006 DEBUG [RS:0;83f56b55f2af:44811 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-14T09:30:32,006 DEBUG [RS:0;83f56b55f2af:44811 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 83f56b55f2af,44811,1731576631688 2024-11-14T09:30:32,006 DEBUG [RS:0;83f56b55f2af:44811 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '83f56b55f2af,44811,1731576631688' 2024-11-14T09:30:32,006 DEBUG [RS:0;83f56b55f2af:44811 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-14T09:30:32,006 DEBUG [RS:0;83f56b55f2af:44811 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-14T09:30:32,007 DEBUG [RS:0;83f56b55f2af:44811 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-14T09:30:32,007 INFO [RS:0;83f56b55f2af:44811 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-14T09:30:32,007 INFO [RS:0;83f56b55f2af:44811 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-14T09:30:32,109 INFO [RS:0;83f56b55f2af:44811 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=83f56b55f2af%2C44811%2C1731576631688, suffix=, logDir=hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688, archiveDir=hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/oldWALs, maxLogs=32 2024-11-14T09:30:32,110 INFO [RS:0;83f56b55f2af:44811 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 83f56b55f2af%2C44811%2C1731576631688.1731576632110 2024-11-14T09:30:32,111 WARN [83f56b55f2af:40525 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-14T09:30:32,117 INFO [RS:0;83f56b55f2af:44811 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.1731576632110 2024-11-14T09:30:32,121 DEBUG [RS:0;83f56b55f2af:44811 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45963:45963),(127.0.0.1/127.0.0.1:34151:34151)] 2024-11-14T09:30:32,361 DEBUG [83f56b55f2af:40525 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-14T09:30:32,362 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=83f56b55f2af,44811,1731576631688 2024-11-14T09:30:32,363 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 83f56b55f2af,44811,1731576631688, state=OPENING 2024-11-14T09:30:32,365 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-14T09:30:32,367 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40525-0x10115d07bd00000, quorum=127.0.0.1:56059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:30:32,367 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44811-0x10115d07bd00001, quorum=127.0.0.1:56059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:30:32,367 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-14T09:30:32,367 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=83f56b55f2af,44811,1731576631688}] 2024-11-14T09:30:32,367 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T09:30:32,367 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T09:30:32,398 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-14T09:30:32,402 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:30:32,413 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:30:32,415 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:30:32,416 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:30:32,521 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-14T09:30:32,523 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59725, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-14T09:30:32,527 INFO [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-14T09:30:32,527 INFO [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T09:30:32,529 INFO [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=83f56b55f2af%2C44811%2C1731576631688.meta, suffix=.meta, logDir=hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688, archiveDir=hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/oldWALs, maxLogs=32 2024-11-14T09:30:32,530 INFO [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta 2024-11-14T09:30:32,535 INFO [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta 2024-11-14T09:30:32,535 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34151:34151),(127.0.0.1/127.0.0.1:45963:45963)] 2024-11-14T09:30:32,536 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-14T09:30:32,536 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-14T09:30:32,537 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-14T09:30:32,537 INFO [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-14T09:30:32,537 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-14T09:30:32,537 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T09:30:32,537 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-14T09:30:32,537 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-14T09:30:32,538 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-14T09:30:32,539 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-14T09:30:32,539 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:30:32,540 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:30:32,540 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-14T09:30:32,540 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-14T09:30:32,540 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:30:32,541 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:30:32,541 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-14T09:30:32,542 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-14T09:30:32,542 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:30:32,542 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:30:32,542 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-14T09:30:32,543 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-14T09:30:32,543 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:30:32,544 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:30:32,544 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-14T09:30:32,545 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/hbase/meta/1588230740 2024-11-14T09:30:32,546 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/hbase/meta/1588230740 2024-11-14T09:30:32,547 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-14T09:30:32,547 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-14T09:30:32,547 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-14T09:30:32,549 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-14T09:30:32,550 INFO [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=842577, jitterRate=0.07139220833778381}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-14T09:30:32,550 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-14T09:30:32,550 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731576632537Writing region info on filesystem at 1731576632537Initializing all the Stores at 1731576632538 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731576632538Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731576632538Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731576632538Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731576632538Cleaning up temporary data from old regions at 1731576632547 (+9 ms)Running coprocessor post-open hooks at 1731576632550 (+3 ms)Region opened successfully at 1731576632550 2024-11-14T09:30:32,551 INFO [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731576632521 2024-11-14T09:30:32,554 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-14T09:30:32,555 INFO [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-14T09:30:32,555 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=83f56b55f2af,44811,1731576631688 2024-11-14T09:30:32,557 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 83f56b55f2af,44811,1731576631688, state=OPEN 2024-11-14T09:30:32,571 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44811-0x10115d07bd00001, quorum=127.0.0.1:56059, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-14T09:30:32,571 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40525-0x10115d07bd00000, quorum=127.0.0.1:56059, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-14T09:30:32,571 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=83f56b55f2af,44811,1731576631688 2024-11-14T09:30:32,571 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T09:30:32,571 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T09:30:32,574 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-14T09:30:32,574 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=83f56b55f2af,44811,1731576631688 in 204 msec 2024-11-14T09:30:32,577 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-14T09:30:32,577 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 616 msec 2024-11-14T09:30:32,578 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T09:30:32,578 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-14T09:30:32,579 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-14T09:30:32,579 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=83f56b55f2af,44811,1731576631688, seqNum=-1] 2024-11-14T09:30:32,580 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-14T09:30:32,581 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47689, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-14T09:30:32,586 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 702 msec 2024-11-14T09:30:32,586 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731576632586, completionTime=-1 2024-11-14T09:30:32,586 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-14T09:30:32,587 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-14T09:30:32,588 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-14T09:30:32,588 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731576692588 2024-11-14T09:30:32,588 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731576752588 2024-11-14T09:30:32,588 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 1 msec 2024-11-14T09:30:32,589 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=83f56b55f2af,40525,1731576631633-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T09:30:32,589 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=83f56b55f2af,40525,1731576631633-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T09:30:32,589 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=83f56b55f2af,40525,1731576631633-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T09:30:32,589 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-83f56b55f2af:40525, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T09:30:32,589 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-14T09:30:32,589 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-14T09:30:32,591 DEBUG [master/83f56b55f2af:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-14T09:30:32,592 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.860sec 2024-11-14T09:30:32,593 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-14T09:30:32,593 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-14T09:30:32,593 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-14T09:30:32,593 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-14T09:30:32,593 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-14T09:30:32,593 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=83f56b55f2af,40525,1731576631633-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-14T09:30:32,593 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=83f56b55f2af,40525,1731576631633-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-14T09:30:32,595 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-14T09:30:32,595 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-14T09:30:32,595 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=83f56b55f2af,40525,1731576631633-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T09:30:32,612 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@17a8f59, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T09:30:32,612 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 83f56b55f2af,40525,-1 for getting cluster id 2024-11-14T09:30:32,613 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-14T09:30:32,614 DEBUG [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '363fd606-b134-4ef3-ad53-2f753a9ae3bd' 2024-11-14T09:30:32,615 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-14T09:30:32,615 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "363fd606-b134-4ef3-ad53-2f753a9ae3bd" 2024-11-14T09:30:32,615 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5b49e5fa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T09:30:32,615 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [83f56b55f2af,40525,-1] 2024-11-14T09:30:32,615 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-14T09:30:32,615 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T09:30:32,617 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45248, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-14T09:30:32,618 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@167e001a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T09:30:32,618 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-14T09:30:32,619 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=83f56b55f2af,44811,1731576631688, seqNum=-1] 2024-11-14T09:30:32,619 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-14T09:30:32,621 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33156, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-14T09:30:32,623 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=83f56b55f2af,40525,1731576631633 2024-11-14T09:30:32,623 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:30:32,626 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-14T09:30:32,641 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/83f56b55f2af:0 server-side Connection retries=45 2024-11-14T09:30:32,641 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T09:30:32,641 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-14T09:30:32,641 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-14T09:30:32,641 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T09:30:32,641 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-14T09:30:32,641 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-14T09:30:32,641 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-14T09:30:32,642 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41383 2024-11-14T09:30:32,643 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:41383 connecting to ZooKeeper ensemble=127.0.0.1:56059 2024-11-14T09:30:32,644 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:30:32,645 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:30:32,649 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:413830x0, quorum=127.0.0.1:56059, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-14T09:30:32,650 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:413830x0, quorum=127.0.0.1:56059, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-11-14T09:30:32,650 DEBUG [pool-381-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-11-14T09:30:32,650 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41383-0x10115d07bd00002 connected 2024-11-14T09:30:32,651 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-14T09:30:32,651 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-14T09:30:32,652 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:41383-0x10115d07bd00002, quorum=127.0.0.1:56059, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-14T09:30:32,653 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41383-0x10115d07bd00002, quorum=127.0.0.1:56059, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-14T09:30:32,654 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41383 2024-11-14T09:30:32,654 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41383 2024-11-14T09:30:32,656 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41383 2024-11-14T09:30:32,658 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41383 2024-11-14T09:30:32,658 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41383 2024-11-14T09:30:32,659 INFO [RS:1;83f56b55f2af:41383 {}] regionserver.HRegionServer(746): ClusterId : 363fd606-b134-4ef3-ad53-2f753a9ae3bd 2024-11-14T09:30:32,659 DEBUG [RS:1;83f56b55f2af:41383 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-14T09:30:32,665 DEBUG [RS:1;83f56b55f2af:41383 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-14T09:30:32,665 DEBUG [RS:1;83f56b55f2af:41383 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-14T09:30:32,667 DEBUG [RS:1;83f56b55f2af:41383 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-14T09:30:32,668 DEBUG [RS:1;83f56b55f2af:41383 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@ee1d7f6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=83f56b55f2af/172.17.0.2:0 2024-11-14T09:30:32,680 DEBUG [RS:1;83f56b55f2af:41383 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;83f56b55f2af:41383 2024-11-14T09:30:32,680 INFO [RS:1;83f56b55f2af:41383 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-14T09:30:32,680 INFO [RS:1;83f56b55f2af:41383 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-14T09:30:32,680 DEBUG [RS:1;83f56b55f2af:41383 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-14T09:30:32,680 INFO [RS:1;83f56b55f2af:41383 {}] regionserver.HRegionServer(2659): reportForDuty to master=83f56b55f2af,40525,1731576631633 with port=41383, startcode=1731576632641 2024-11-14T09:30:32,681 DEBUG [RS:1;83f56b55f2af:41383 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-14T09:30:32,682 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60881, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-11-14T09:30:32,683 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40525 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 83f56b55f2af,41383,1731576632641 2024-11-14T09:30:32,683 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40525 {}] master.ServerManager(517): Registering regionserver=83f56b55f2af,41383,1731576632641 2024-11-14T09:30:32,684 DEBUG [RS:1;83f56b55f2af:41383 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60 2024-11-14T09:30:32,684 DEBUG [RS:1;83f56b55f2af:41383 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:36133 2024-11-14T09:30:32,684 DEBUG [RS:1;83f56b55f2af:41383 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-14T09:30:32,690 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40525-0x10115d07bd00000, quorum=127.0.0.1:56059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-14T09:30:32,690 DEBUG [RS:1;83f56b55f2af:41383 {}] zookeeper.ZKUtil(111): regionserver:41383-0x10115d07bd00002, quorum=127.0.0.1:56059, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/83f56b55f2af,41383,1731576632641 2024-11-14T09:30:32,690 WARN [RS:1;83f56b55f2af:41383 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-14T09:30:32,691 INFO [RS:1;83f56b55f2af:41383 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T09:30:32,691 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [83f56b55f2af,41383,1731576632641] 2024-11-14T09:30:32,691 DEBUG [RS:1;83f56b55f2af:41383 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641 2024-11-14T09:30:32,694 INFO [RS:1;83f56b55f2af:41383 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-14T09:30:32,696 INFO [RS:1;83f56b55f2af:41383 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-14T09:30:32,696 INFO [RS:1;83f56b55f2af:41383 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-14T09:30:32,696 INFO [RS:1;83f56b55f2af:41383 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T09:30:32,696 INFO [RS:1;83f56b55f2af:41383 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-14T09:30:32,697 INFO [RS:1;83f56b55f2af:41383 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-14T09:30:32,697 INFO [RS:1;83f56b55f2af:41383 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-14T09:30:32,698 DEBUG [RS:1;83f56b55f2af:41383 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:30:32,698 DEBUG [RS:1;83f56b55f2af:41383 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:30:32,698 DEBUG [RS:1;83f56b55f2af:41383 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:30:32,698 DEBUG [RS:1;83f56b55f2af:41383 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:30:32,698 DEBUG [RS:1;83f56b55f2af:41383 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:30:32,698 DEBUG [RS:1;83f56b55f2af:41383 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/83f56b55f2af:0, corePoolSize=2, maxPoolSize=2 2024-11-14T09:30:32,698 DEBUG [RS:1;83f56b55f2af:41383 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:30:32,698 DEBUG [RS:1;83f56b55f2af:41383 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:30:32,698 DEBUG [RS:1;83f56b55f2af:41383 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:30:32,698 DEBUG [RS:1;83f56b55f2af:41383 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:30:32,698 DEBUG [RS:1;83f56b55f2af:41383 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:30:32,698 DEBUG [RS:1;83f56b55f2af:41383 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:30:32,698 DEBUG [RS:1;83f56b55f2af:41383 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/83f56b55f2af:0, corePoolSize=3, maxPoolSize=3 2024-11-14T09:30:32,698 DEBUG [RS:1;83f56b55f2af:41383 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/83f56b55f2af:0, corePoolSize=3, maxPoolSize=3 2024-11-14T09:30:32,698 INFO [RS:1;83f56b55f2af:41383 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-14T09:30:32,699 INFO [RS:1;83f56b55f2af:41383 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-14T09:30:32,699 INFO [RS:1;83f56b55f2af:41383 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T09:30:32,699 INFO [RS:1;83f56b55f2af:41383 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-14T09:30:32,699 INFO [RS:1;83f56b55f2af:41383 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-14T09:30:32,699 INFO [RS:1;83f56b55f2af:41383 {}] hbase.ChoreService(168): Chore ScheduledChore name=83f56b55f2af,41383,1731576632641-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-14T09:30:32,713 INFO [RS:1;83f56b55f2af:41383 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-14T09:30:32,713 INFO [RS:1;83f56b55f2af:41383 {}] hbase.ChoreService(168): Chore ScheduledChore name=83f56b55f2af,41383,1731576632641-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T09:30:32,713 INFO [RS:1;83f56b55f2af:41383 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T09:30:32,713 INFO [RS:1;83f56b55f2af:41383 {}] regionserver.Replication(171): 83f56b55f2af,41383,1731576632641 started 2024-11-14T09:30:32,726 INFO [RS:1;83f56b55f2af:41383 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T09:30:32,726 INFO [RS:1;83f56b55f2af:41383 {}] regionserver.HRegionServer(1482): Serving as 83f56b55f2af,41383,1731576632641, RpcServer on 83f56b55f2af/172.17.0.2:41383, sessionid=0x10115d07bd00002 2024-11-14T09:30:32,727 DEBUG [RS:1;83f56b55f2af:41383 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-14T09:30:32,727 DEBUG [RS:1;83f56b55f2af:41383 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 83f56b55f2af,41383,1731576632641 2024-11-14T09:30:32,727 DEBUG [RS:1;83f56b55f2af:41383 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '83f56b55f2af,41383,1731576632641' 2024-11-14T09:30:32,727 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2882): Started new server=Thread[RS:1;83f56b55f2af:41383,5,FailOnTimeoutGroup] 2024-11-14T09:30:32,727 DEBUG [RS:1;83f56b55f2af:41383 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-14T09:30:32,727 INFO [Time-limited test {}] wal.TestLogRolling(207): Replication=2 2024-11-14T09:30:32,727 DEBUG [RS:1;83f56b55f2af:41383 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-14T09:30:32,727 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-14T09:30:32,728 DEBUG [RS:1;83f56b55f2af:41383 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-14T09:30:32,728 DEBUG [RS:1;83f56b55f2af:41383 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-14T09:30:32,728 DEBUG [RS:1;83f56b55f2af:41383 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 83f56b55f2af,41383,1731576632641 2024-11-14T09:30:32,728 DEBUG [RS:1;83f56b55f2af:41383 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '83f56b55f2af,41383,1731576632641' 2024-11-14T09:30:32,728 DEBUG [RS:1;83f56b55f2af:41383 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-14T09:30:32,728 DEBUG [RS:1;83f56b55f2af:41383 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-14T09:30:32,728 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.AsyncConnectionImpl(321): The fetched master address is 83f56b55f2af,40525,1731576631633 2024-11-14T09:30:32,728 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@7e1dbd5b 2024-11-14T09:30:32,728 DEBUG [RS:1;83f56b55f2af:41383 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-14T09:30:32,728 INFO [RS:1;83f56b55f2af:41383 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-14T09:30:32,729 INFO [RS:1;83f56b55f2af:41383 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-14T09:30:32,729 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-14T09:30:32,731 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45258, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-14T09:30:32,731 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40525 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-14T09:30:32,731 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40525 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-14T09:30:32,732 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40525 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnDatanodeDeath', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-14T09:30:32,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40525 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath 2024-11-14T09:30:32,735 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_PRE_OPERATION 2024-11-14T09:30:32,735 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:30:32,735 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40525 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnDatanodeDeath" procId is: 4 2024-11-14T09:30:32,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40525 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-14T09:30:32,736 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-14T09:30:32,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37737 is added to blk_1073741835_1011 (size=393) 2024-11-14T09:30:32,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46553 is added to blk_1073741835_1011 (size=393) 2024-11-14T09:30:32,745 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 6901bf69dd23f7cb187b9800f3d62146, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1731576632731.6901bf69dd23f7cb187b9800f3d62146.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnDatanodeDeath', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60 2024-11-14T09:30:32,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37737 is added to blk_1073741836_1012 (size=76) 2024-11-14T09:30:32,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46553 is added to blk_1073741836_1012 (size=76) 2024-11-14T09:30:32,753 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1731576632731.6901bf69dd23f7cb187b9800f3d62146.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T09:30:32,753 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1722): Closing 6901bf69dd23f7cb187b9800f3d62146, disabling compactions & flushes 2024-11-14T09:30:32,753 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1731576632731.6901bf69dd23f7cb187b9800f3d62146. 2024-11-14T09:30:32,753 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731576632731.6901bf69dd23f7cb187b9800f3d62146. 2024-11-14T09:30:32,753 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731576632731.6901bf69dd23f7cb187b9800f3d62146. after waiting 0 ms 2024-11-14T09:30:32,753 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1731576632731.6901bf69dd23f7cb187b9800f3d62146. 2024-11-14T09:30:32,753 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731576632731.6901bf69dd23f7cb187b9800f3d62146. 2024-11-14T09:30:32,753 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1676): Region close journal for 6901bf69dd23f7cb187b9800f3d62146: Waiting for close lock at 1731576632753Disabling compacts and flushes for region at 1731576632753Disabling writes for close at 1731576632753Writing region close event to WAL at 1731576632753Closed at 1731576632753 2024-11-14T09:30:32,755 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ADD_TO_META 2024-11-14T09:30:32,755 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnDatanodeDeath,,1731576632731.6901bf69dd23f7cb187b9800f3d62146.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1731576632755"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731576632755"}]},"ts":"1731576632755"} 2024-11-14T09:30:32,758 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-14T09:30:32,759 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-14T09:30:32,759 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731576632759"}]},"ts":"1731576632759"} 2024-11-14T09:30:32,762 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLING in hbase:meta 2024-11-14T09:30:32,762 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=6901bf69dd23f7cb187b9800f3d62146, ASSIGN}] 2024-11-14T09:30:32,763 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=6901bf69dd23f7cb187b9800f3d62146, ASSIGN 2024-11-14T09:30:32,765 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=6901bf69dd23f7cb187b9800f3d62146, ASSIGN; state=OFFLINE, location=83f56b55f2af,44811,1731576631688; forceNewPlan=false, retain=false 2024-11-14T09:30:32,831 INFO [RS:1;83f56b55f2af:41383 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=83f56b55f2af%2C41383%2C1731576632641, suffix=, logDir=hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641, archiveDir=hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/oldWALs, maxLogs=32 2024-11-14T09:30:32,832 INFO [RS:1;83f56b55f2af:41383 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 83f56b55f2af%2C41383%2C1731576632641.1731576632831 2024-11-14T09:30:32,838 INFO [RS:1;83f56b55f2af:41383 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 2024-11-14T09:30:32,838 DEBUG [RS:1;83f56b55f2af:41383 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45963:45963),(127.0.0.1/127.0.0.1:34151:34151)] 2024-11-14T09:30:32,915 INFO [83f56b55f2af:40525 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-14T09:30:32,916 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=6901bf69dd23f7cb187b9800f3d62146, regionState=OPENING, regionLocation=83f56b55f2af,44811,1731576631688 2024-11-14T09:30:32,919 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=6901bf69dd23f7cb187b9800f3d62146, ASSIGN because future has completed 2024-11-14T09:30:32,919 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6901bf69dd23f7cb187b9800f3d62146, server=83f56b55f2af,44811,1731576631688}] 2024-11-14T09:30:33,077 INFO [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnDatanodeDeath,,1731576632731.6901bf69dd23f7cb187b9800f3d62146. 2024-11-14T09:30:33,077 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 6901bf69dd23f7cb187b9800f3d62146, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1731576632731.6901bf69dd23f7cb187b9800f3d62146.', STARTKEY => '', ENDKEY => ''} 2024-11-14T09:30:33,077 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnDatanodeDeath 6901bf69dd23f7cb187b9800f3d62146 2024-11-14T09:30:33,077 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1731576632731.6901bf69dd23f7cb187b9800f3d62146.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T09:30:33,078 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 6901bf69dd23f7cb187b9800f3d62146 2024-11-14T09:30:33,078 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 6901bf69dd23f7cb187b9800f3d62146 2024-11-14T09:30:33,079 INFO [StoreOpener-6901bf69dd23f7cb187b9800f3d62146-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 6901bf69dd23f7cb187b9800f3d62146 2024-11-14T09:30:33,080 INFO [StoreOpener-6901bf69dd23f7cb187b9800f3d62146-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6901bf69dd23f7cb187b9800f3d62146 columnFamilyName info 2024-11-14T09:30:33,080 DEBUG [StoreOpener-6901bf69dd23f7cb187b9800f3d62146-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:30:33,081 INFO [StoreOpener-6901bf69dd23f7cb187b9800f3d62146-1 {}] regionserver.HStore(327): Store=6901bf69dd23f7cb187b9800f3d62146/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T09:30:33,081 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 6901bf69dd23f7cb187b9800f3d62146 2024-11-14T09:30:33,082 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146 2024-11-14T09:30:33,082 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146 2024-11-14T09:30:33,083 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 6901bf69dd23f7cb187b9800f3d62146 2024-11-14T09:30:33,083 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 6901bf69dd23f7cb187b9800f3d62146 2024-11-14T09:30:33,084 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 6901bf69dd23f7cb187b9800f3d62146 2024-11-14T09:30:33,086 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T09:30:33,087 INFO [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 6901bf69dd23f7cb187b9800f3d62146; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=696802, jitterRate=-0.11397075653076172}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-14T09:30:33,087 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 6901bf69dd23f7cb187b9800f3d62146 2024-11-14T09:30:33,087 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 6901bf69dd23f7cb187b9800f3d62146: Running coprocessor pre-open hook at 1731576633078Writing region info on filesystem at 1731576633078Initializing all the Stores at 1731576633079 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731576633079Cleaning up temporary data from old regions at 1731576633083 (+4 ms)Running coprocessor post-open hooks at 1731576633087 (+4 ms)Region opened successfully at 1731576633087 2024-11-14T09:30:33,088 INFO [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnDatanodeDeath,,1731576632731.6901bf69dd23f7cb187b9800f3d62146., pid=6, masterSystemTime=1731576633073 2024-11-14T09:30:33,091 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnDatanodeDeath,,1731576632731.6901bf69dd23f7cb187b9800f3d62146. 2024-11-14T09:30:33,091 INFO [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnDatanodeDeath,,1731576632731.6901bf69dd23f7cb187b9800f3d62146. 2024-11-14T09:30:33,092 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=6901bf69dd23f7cb187b9800f3d62146, regionState=OPEN, openSeqNum=2, regionLocation=83f56b55f2af,44811,1731576631688 2024-11-14T09:30:33,094 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6901bf69dd23f7cb187b9800f3d62146, server=83f56b55f2af,44811,1731576631688 because future has completed 2024-11-14T09:30:33,098 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-14T09:30:33,098 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 6901bf69dd23f7cb187b9800f3d62146, server=83f56b55f2af,44811,1731576631688 in 177 msec 2024-11-14T09:30:33,101 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-14T09:30:33,101 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=6901bf69dd23f7cb187b9800f3d62146, ASSIGN in 336 msec 2024-11-14T09:30:33,102 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-14T09:30:33,103 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731576633103"}]},"ts":"1731576633103"} 2024-11-14T09:30:33,105 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLED in hbase:meta 2024-11-14T09:30:33,106 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_POST_OPERATION 2024-11-14T09:30:33,108 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath in 374 msec 2024-11-14T09:30:37,298 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-14T09:30:37,301 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:30:37,312 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:30:37,314 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:30:37,315 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:30:37,972 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnDatanodeDeath' 2024-11-14T09:30:41,793 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-14T09:30:41,794 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-14T09:30:41,795 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-14T09:30:41,795 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath Metrics about Tables on a single HBase RegionServer 2024-11-14T09:30:41,795 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-14T09:30:41,795 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-14T09:30:41,795 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-14T09:30:41,795 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-14T09:30:42,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40525 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-14T09:30:42,804 INFO [RPCClient-NioEventLoopGroup-4-11 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnDatanodeDeath completed 2024-11-14T09:30:42,804 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnDatanodeDeath,, stopping at row=TestLogRolling-testLogRollOnDatanodeDeath ,, for max=2147483647 with caching=100 2024-11-14T09:30:42,809 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnDatanodeDeath 2024-11-14T09:30:42,809 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnDatanodeDeath,,1731576632731.6901bf69dd23f7cb187b9800f3d62146. 2024-11-14T09:30:42,839 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T09:30:42,843 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T09:30:42,844 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T09:30:42,845 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T09:30:42,845 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-14T09:30:42,845 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6134c032{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ffe8a70d-1c55-147c-4cf7-f32b5e8ed0b0/hadoop.log.dir/,AVAILABLE} 2024-11-14T09:30:42,845 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4d9228f7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T09:30:42,971 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5a884c76{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ffe8a70d-1c55-147c-4cf7-f32b5e8ed0b0/java.io.tmpdir/jetty-localhost-44425-hadoop-hdfs-3_4_1-tests_jar-_-any-15310747494708715513/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T09:30:42,972 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@32dff706{HTTP/1.1, (http/1.1)}{localhost:44425} 2024-11-14T09:30:42,972 INFO [Time-limited test {}] server.Server(415): Started @117158ms 2024-11-14T09:30:42,973 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T09:30:43,012 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T09:30:43,015 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T09:30:43,015 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T09:30:43,015 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T09:30:43,016 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-14T09:30:43,016 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@522c0677{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ffe8a70d-1c55-147c-4cf7-f32b5e8ed0b0/hadoop.log.dir/,AVAILABLE} 2024-11-14T09:30:43,016 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2c1f678{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T09:30:43,105 WARN [Thread-828 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ffe8a70d-1c55-147c-4cf7-f32b5e8ed0b0/cluster_81ef3b20-c9a7-5a06-86c1-55a43558ceb4/data/data6/current/BP-2105580279-172.17.0.2-1731576630823/current, will proceed with Du for space computation calculation, 2024-11-14T09:30:43,105 WARN [Thread-827 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ffe8a70d-1c55-147c-4cf7-f32b5e8ed0b0/cluster_81ef3b20-c9a7-5a06-86c1-55a43558ceb4/data/data5/current/BP-2105580279-172.17.0.2-1731576630823/current, will proceed with Du for space computation calculation, 2024-11-14T09:30:43,131 WARN [Thread-807 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T09:30:43,134 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd9f91bda2f8fe243 with lease ID 0xe55f664660dab17: Processing first storage report for DS-c2603025-b29b-4678-b265-b194df3ecbcd from datanode DatanodeRegistration(127.0.0.1:37063, datanodeUuid=df6d81db-1c2c-407c-840a-6291d4fb7417, infoPort=41223, infoSecurePort=0, ipcPort=36993, storageInfo=lv=-57;cid=testClusterID;nsid=1875755621;c=1731576630823) 2024-11-14T09:30:43,134 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd9f91bda2f8fe243 with lease ID 0xe55f664660dab17: from storage DS-c2603025-b29b-4678-b265-b194df3ecbcd node DatanodeRegistration(127.0.0.1:37063, datanodeUuid=df6d81db-1c2c-407c-840a-6291d4fb7417, infoPort=41223, infoSecurePort=0, ipcPort=36993, storageInfo=lv=-57;cid=testClusterID;nsid=1875755621;c=1731576630823), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T09:30:43,134 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd9f91bda2f8fe243 with lease ID 0xe55f664660dab17: Processing first storage report for DS-3c908471-9d49-4cf5-a8a0-02d1d9906b47 from datanode DatanodeRegistration(127.0.0.1:37063, datanodeUuid=df6d81db-1c2c-407c-840a-6291d4fb7417, infoPort=41223, infoSecurePort=0, ipcPort=36993, storageInfo=lv=-57;cid=testClusterID;nsid=1875755621;c=1731576630823) 2024-11-14T09:30:43,134 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd9f91bda2f8fe243 with lease ID 0xe55f664660dab17: from storage DS-3c908471-9d49-4cf5-a8a0-02d1d9906b47 node DatanodeRegistration(127.0.0.1:37063, datanodeUuid=df6d81db-1c2c-407c-840a-6291d4fb7417, infoPort=41223, infoSecurePort=0, ipcPort=36993, storageInfo=lv=-57;cid=testClusterID;nsid=1875755621;c=1731576630823), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T09:30:43,174 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1d6f136d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ffe8a70d-1c55-147c-4cf7-f32b5e8ed0b0/java.io.tmpdir/jetty-localhost-44143-hadoop-hdfs-3_4_1-tests_jar-_-any-935246619357158744/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T09:30:43,175 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2d0b72b6{HTTP/1.1, (http/1.1)}{localhost:44143} 2024-11-14T09:30:43,175 INFO [Time-limited test {}] server.Server(415): Started @117361ms 2024-11-14T09:30:43,177 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T09:30:43,282 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T09:30:43,292 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T09:30:43,295 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T09:30:43,295 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T09:30:43,295 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-14T09:30:43,297 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@15717f84{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ffe8a70d-1c55-147c-4cf7-f32b5e8ed0b0/hadoop.log.dir/,AVAILABLE} 2024-11-14T09:30:43,297 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4067923b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T09:30:43,334 WARN [Thread-862 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ffe8a70d-1c55-147c-4cf7-f32b5e8ed0b0/cluster_81ef3b20-c9a7-5a06-86c1-55a43558ceb4/data/data7/current/BP-2105580279-172.17.0.2-1731576630823/current, will proceed with Du for space computation calculation, 2024-11-14T09:30:43,334 WARN [Thread-863 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ffe8a70d-1c55-147c-4cf7-f32b5e8ed0b0/cluster_81ef3b20-c9a7-5a06-86c1-55a43558ceb4/data/data8/current/BP-2105580279-172.17.0.2-1731576630823/current, will proceed with Du for space computation calculation, 2024-11-14T09:30:43,362 WARN [Thread-842 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T09:30:43,367 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xfbd80d7342787a44 with lease ID 0xe55f664660dab18: Processing first storage report for DS-b1363d96-78de-4629-8c46-e5cd4cbb9e02 from datanode DatanodeRegistration(127.0.0.1:42393, datanodeUuid=8d91c2d0-a1ea-4db7-bada-85ae4e8ce831, infoPort=34607, infoSecurePort=0, ipcPort=45647, storageInfo=lv=-57;cid=testClusterID;nsid=1875755621;c=1731576630823) 2024-11-14T09:30:43,367 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfbd80d7342787a44 with lease ID 0xe55f664660dab18: from storage DS-b1363d96-78de-4629-8c46-e5cd4cbb9e02 node DatanodeRegistration(127.0.0.1:42393, datanodeUuid=8d91c2d0-a1ea-4db7-bada-85ae4e8ce831, infoPort=34607, infoSecurePort=0, ipcPort=45647, storageInfo=lv=-57;cid=testClusterID;nsid=1875755621;c=1731576630823), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T09:30:43,367 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xfbd80d7342787a44 with lease ID 0xe55f664660dab18: Processing first storage report for DS-1cbf4e0c-f285-44c4-b016-67ca7266def2 from datanode DatanodeRegistration(127.0.0.1:42393, datanodeUuid=8d91c2d0-a1ea-4db7-bada-85ae4e8ce831, infoPort=34607, infoSecurePort=0, ipcPort=45647, storageInfo=lv=-57;cid=testClusterID;nsid=1875755621;c=1731576630823) 2024-11-14T09:30:43,368 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfbd80d7342787a44 with lease ID 0xe55f664660dab18: from storage DS-1cbf4e0c-f285-44c4-b016-67ca7266def2 node DatanodeRegistration(127.0.0.1:42393, datanodeUuid=8d91c2d0-a1ea-4db7-bada-85ae4e8ce831, infoPort=34607, infoSecurePort=0, ipcPort=45647, storageInfo=lv=-57;cid=testClusterID;nsid=1875755621;c=1731576630823), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T09:30:43,457 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7f59c31d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ffe8a70d-1c55-147c-4cf7-f32b5e8ed0b0/java.io.tmpdir/jetty-localhost-34571-hadoop-hdfs-3_4_1-tests_jar-_-any-15105677395020419929/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T09:30:43,458 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4e5cd346{HTTP/1.1, (http/1.1)}{localhost:34571} 2024-11-14T09:30:43,458 INFO [Time-limited test {}] server.Server(415): Started @117644ms 2024-11-14T09:30:43,460 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T09:30:43,553 WARN [Thread-888 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ffe8a70d-1c55-147c-4cf7-f32b5e8ed0b0/cluster_81ef3b20-c9a7-5a06-86c1-55a43558ceb4/data/data9/current/BP-2105580279-172.17.0.2-1731576630823/current, will proceed with Du for space computation calculation, 2024-11-14T09:30:43,554 WARN [Thread-889 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ffe8a70d-1c55-147c-4cf7-f32b5e8ed0b0/cluster_81ef3b20-c9a7-5a06-86c1-55a43558ceb4/data/data10/current/BP-2105580279-172.17.0.2-1731576630823/current, will proceed with Du for space computation calculation, 2024-11-14T09:30:43,571 WARN [Thread-877 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T09:30:43,573 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x79d848b34acb977c with lease ID 0xe55f664660dab19: Processing first storage report for DS-ce8d1ceb-6553-4b42-9334-d7c84d70f99d from datanode DatanodeRegistration(127.0.0.1:33547, datanodeUuid=681bb4f6-986c-4484-806b-6e345c1f621c, infoPort=35991, infoSecurePort=0, ipcPort=43145, storageInfo=lv=-57;cid=testClusterID;nsid=1875755621;c=1731576630823) 2024-11-14T09:30:43,573 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x79d848b34acb977c with lease ID 0xe55f664660dab19: from storage DS-ce8d1ceb-6553-4b42-9334-d7c84d70f99d node DatanodeRegistration(127.0.0.1:33547, datanodeUuid=681bb4f6-986c-4484-806b-6e345c1f621c, infoPort=35991, infoSecurePort=0, ipcPort=43145, storageInfo=lv=-57;cid=testClusterID;nsid=1875755621;c=1731576630823), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T09:30:43,573 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x79d848b34acb977c with lease ID 0xe55f664660dab19: Processing first storage report for DS-6de96ad9-65fa-487c-bf30-80f7e184b830 from datanode DatanodeRegistration(127.0.0.1:33547, datanodeUuid=681bb4f6-986c-4484-806b-6e345c1f621c, infoPort=35991, infoSecurePort=0, ipcPort=43145, storageInfo=lv=-57;cid=testClusterID;nsid=1875755621;c=1731576630823) 2024-11-14T09:30:43,573 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x79d848b34acb977c with lease ID 0xe55f664660dab19: from storage DS-6de96ad9-65fa-487c-bf30-80f7e184b830 node DatanodeRegistration(127.0.0.1:33547, datanodeUuid=681bb4f6-986c-4484-806b-6e345c1f621c, infoPort=35991, infoSecurePort=0, ipcPort=43145, storageInfo=lv=-57;cid=testClusterID;nsid=1875755621;c=1731576630823), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T09:30:43,586 WARN [ResponseProcessor for block BP-2105580279-172.17.0.2-1731576630823:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-2105580279-172.17.0.2-1731576630823:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:30:43,586 WARN [ResponseProcessor for block BP-2105580279-172.17.0.2-1731576630823:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-2105580279-172.17.0.2-1731576630823:blk_1073741830_1006 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:30:43,586 WARN [ResponseProcessor for block BP-2105580279-172.17.0.2-1731576630823:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-2105580279-172.17.0.2-1731576630823:blk_1073741833_1009 java.io.IOException: Bad response ERROR for BP-2105580279-172.17.0.2-1731576630823:blk_1073741833_1009 from datanode DatanodeInfoWithStorage[127.0.0.1:46553,DS-820f820f-a0a2-4a8b-914e-94b63dc152d2,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:30:43,587 WARN [DataStreamer for file /user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/MasterData/WALs/83f56b55f2af,40525,1731576631633/83f56b55f2af%2C40525%2C1731576631633.1731576631798 block BP-2105580279-172.17.0.2-1731576630823:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2105580279-172.17.0.2-1731576630823:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46553,DS-820f820f-a0a2-4a8b-914e-94b63dc152d2,DISK], DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46553,DS-820f820f-a0a2-4a8b-914e-94b63dc152d2,DISK]) is bad. 2024-11-14T09:30:43,587 WARN [ResponseProcessor for block BP-2105580279-172.17.0.2-1731576630823:blk_1073741837_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-2105580279-172.17.0.2-1731576630823:blk_1073741837_1013 java.io.IOException: Bad response ERROR for BP-2105580279-172.17.0.2-1731576630823:blk_1073741837_1013 from datanode DatanodeInfoWithStorage[127.0.0.1:46553,DS-820f820f-a0a2-4a8b-914e-94b63dc152d2,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:30:43,587 WARN [DataStreamer for file /user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 block BP-2105580279-172.17.0.2-1731576630823:blk_1073741837_1013 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2105580279-172.17.0.2-1731576630823:blk_1073741837_1013 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK], DatanodeInfoWithStorage[127.0.0.1:46553,DS-820f820f-a0a2-4a8b-914e-94b63dc152d2,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:46553,DS-820f820f-a0a2-4a8b-914e-94b63dc152d2,DISK]) is bad. 2024-11-14T09:30:43,587 WARN [PacketResponder: BP-2105580279-172.17.0.2-1731576630823:blk_1073741837_1013, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:46553] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:30:43,587 WARN [DataStreamer for file /user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.1731576632110 block BP-2105580279-172.17.0.2-1731576630823:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2105580279-172.17.0.2-1731576630823:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK], DatanodeInfoWithStorage[127.0.0.1:46553,DS-820f820f-a0a2-4a8b-914e-94b63dc152d2,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:46553,DS-820f820f-a0a2-4a8b-914e-94b63dc152d2,DISK]) is bad. 2024-11-14T09:30:43,588 WARN [DataStreamer for file /user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta block BP-2105580279-172.17.0.2-1731576630823:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2105580279-172.17.0.2-1731576630823:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46553,DS-820f820f-a0a2-4a8b-914e-94b63dc152d2,DISK], DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46553,DS-820f820f-a0a2-4a8b-914e-94b63dc152d2,DISK]) is bad. 2024-11-14T09:30:43,587 WARN [PacketResponder: BP-2105580279-172.17.0.2-1731576630823:blk_1073741833_1009, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:46553] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:30:43,589 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1558881060_22 at /127.0.0.1:40930 [Receiving block BP-2105580279-172.17.0.2-1731576630823:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:46553:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40930 dst: /127.0.0.1:46553 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:30:43,589 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6a102770{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T09:30:43,590 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1006583690_22 at /127.0.0.1:40044 [Receiving block BP-2105580279-172.17.0.2-1731576630823:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:37737:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40044 dst: /127.0.0.1:37737 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:30:43,590 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@385db9bf{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T09:30:43,590 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T09:30:43,590 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1464530290_22 at /127.0.0.1:40076 [Receiving block BP-2105580279-172.17.0.2-1731576630823:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:37737:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40076 dst: /127.0.0.1:37737 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:30:43,590 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1006583690_22 at /127.0.0.1:40958 [Receiving block BP-2105580279-172.17.0.2-1731576630823:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:46553:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40958 dst: /127.0.0.1:46553 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:30:43,591 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@30985369{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T09:30:43,591 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@c3714ef{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ffe8a70d-1c55-147c-4cf7-f32b5e8ed0b0/hadoop.log.dir/,STOPPED} 2024-11-14T09:30:43,590 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1558881060_22 at /127.0.0.1:40012 [Receiving block BP-2105580279-172.17.0.2-1731576630823:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:37737:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40012 dst: /127.0.0.1:37737 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:30:43,590 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1464530290_22 at /127.0.0.1:41006 [Receiving block BP-2105580279-172.17.0.2-1731576630823:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:46553:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41006 dst: /127.0.0.1:46553 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:30:43,591 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1006583690_22 at /127.0.0.1:40970 [Receiving block BP-2105580279-172.17.0.2-1731576630823:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:46553:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40970 dst: /127.0.0.1:46553 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:30:43,592 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1006583690_22 at /127.0.0.1:40060 [Receiving block BP-2105580279-172.17.0.2-1731576630823:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:37737:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40060 dst: /127.0.0.1:37737 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:30:43,593 WARN [BP-2105580279-172.17.0.2-1731576630823 heartbeating to localhost/127.0.0.1:36133 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T09:30:43,593 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T09:30:43,593 WARN [BP-2105580279-172.17.0.2-1731576630823 heartbeating to localhost/127.0.0.1:36133 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2105580279-172.17.0.2-1731576630823 (Datanode Uuid 340cd0fe-f1d0-4fc4-8a97-207897c75a92) service to localhost/127.0.0.1:36133 2024-11-14T09:30:43,593 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T09:30:43,594 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ffe8a70d-1c55-147c-4cf7-f32b5e8ed0b0/cluster_81ef3b20-c9a7-5a06-86c1-55a43558ceb4/data/data3/current/BP-2105580279-172.17.0.2-1731576630823 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T09:30:43,594 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T09:30:43,595 WARN [DataStreamer for file /user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.1731576632110 block BP-2105580279-172.17.0.2-1731576630823:blk_1073741833_1009 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741833_1009 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:30:43,595 WARN [DataStreamer for file /user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/MasterData/WALs/83f56b55f2af,40525,1731576631633/83f56b55f2af%2C40525%2C1731576631633.1731576631798 block BP-2105580279-172.17.0.2-1731576630823:blk_1073741830_1006 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741830_1006 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:30:43,595 WARN [DataStreamer for file /user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta block BP-2105580279-172.17.0.2-1731576630823:blk_1073741834_1010 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741834_1010 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:30:43,595 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ffe8a70d-1c55-147c-4cf7-f32b5e8ed0b0/cluster_81ef3b20-c9a7-5a06-86c1-55a43558ceb4/data/data4/current/BP-2105580279-172.17.0.2-1731576630823 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T09:30:43,596 ERROR [org.apache.hadoop.hdfs.server.datanode.DataXceiver@6c53b989 {}] datanode.DataXceiver(331): 127.0.0.1:37737:DataXceiver error processing unknown operation src: /127.0.0.1:45064 dst: /127.0.0.1:37737 java.io.IOException: Server closed. at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.addPeer(DataXceiverServer.java:334) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:232) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:30:43,596 WARN [DataStreamer for file /user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 block BP-2105580279-172.17.0.2-1731576630823:blk_1073741837_1013 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741837_1013 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:30:43,597 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2c46fc61{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T09:30:43,598 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1173af39{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T09:30:43,598 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T09:30:43,598 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@778fdefb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T09:30:43,598 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@72770c41{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ffe8a70d-1c55-147c-4cf7-f32b5e8ed0b0/hadoop.log.dir/,STOPPED} 2024-11-14T09:30:43,599 WARN [BP-2105580279-172.17.0.2-1731576630823 heartbeating to localhost/127.0.0.1:36133 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T09:30:43,599 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T09:30:43,599 WARN [BP-2105580279-172.17.0.2-1731576630823 heartbeating to localhost/127.0.0.1:36133 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2105580279-172.17.0.2-1731576630823 (Datanode Uuid 155aa341-5430-4b94-8a29-880e06e197a9) service to localhost/127.0.0.1:36133 2024-11-14T09:30:43,599 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T09:30:43,600 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ffe8a70d-1c55-147c-4cf7-f32b5e8ed0b0/cluster_81ef3b20-c9a7-5a06-86c1-55a43558ceb4/data/data1/current/BP-2105580279-172.17.0.2-1731576630823 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T09:30:43,600 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ffe8a70d-1c55-147c-4cf7-f32b5e8ed0b0/cluster_81ef3b20-c9a7-5a06-86c1-55a43558ceb4/data/data2/current/BP-2105580279-172.17.0.2-1731576630823 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T09:30:43,600 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T09:30:43,603 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnDatanodeDeath', row='row0002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnDatanodeDeath,,1731576632731.6901bf69dd23f7cb187b9800f3d62146., hostname=83f56b55f2af,44811,1731576631688, seqNum=2] 2024-11-14T09:30:43,605 ERROR [FSHLog-0-hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60-prefix:83f56b55f2af,44811,1731576631688 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:30:43,605 WARN [FSHLog-0-hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60-prefix:83f56b55f2af,44811,1731576631688 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:30:43,605 INFO [regionserver/83f56b55f2af:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:30:43,605 DEBUG [regionserver/83f56b55f2af:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 83f56b55f2af%2C44811%2C1731576631688:(num 1731576632110) roll requested 2024-11-14T09:30:43,605 INFO [regionserver/83f56b55f2af:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 83f56b55f2af%2C44811%2C1731576631688.1731576643605 2024-11-14T09:30:43,614 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:30:43,614 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:30:43,614 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:30:43,614 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:30:43,614 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:30:43,614 INFO [regionserver/83f56b55f2af:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.1731576632110 with entries=1, filesize=455 B; new WAL /user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.1731576643605 2024-11-14T09:30:43,615 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:30:43,615 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:30:43,616 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-11-14T09:30:43,617 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-11-14T09:30:43,617 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.1731576632110 2024-11-14T09:30:43,618 DEBUG [regionserver/83f56b55f2af:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35991:35991),(127.0.0.1/127.0.0.1:41223:41223)] 2024-11-14T09:30:43,618 DEBUG [regionserver/83f56b55f2af:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.1731576632110 is not closed yet, will try archiving it next time 2024-11-14T09:30:43,620 WARN [IPC Server handler 1 on default port 36133 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.1731576632110 has not been closed. Lease recovery is in progress. RecoveryId = 1019 for block blk_1073741833_1009 2024-11-14T09:30:43,623 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.1731576632110 after 5ms 2024-11-14T09:30:43,774 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:30:44,699 INFO [regionserver/83f56b55f2af:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:30:45,618 INFO [regionserver/83f56b55f2af:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:30:45,620 INFO [Time-limited test {}] wal.TestLogRolling(261): log.getCurrentFileName(): hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.1731576643605 2024-11-14T09:30:45,620 WARN [ResponseProcessor for block BP-2105580279-172.17.0.2-1731576630823:blk_1073741838_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-2105580279-172.17.0.2-1731576630823:blk_1073741838_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:30:45,621 WARN [DataStreamer for file /user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.1731576643605 block BP-2105580279-172.17.0.2-1731576630823:blk_1073741838_1018 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2105580279-172.17.0.2-1731576630823:blk_1073741838_1018 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33547,DS-ce8d1ceb-6553-4b42-9334-d7c84d70f99d,DISK], DatanodeInfoWithStorage[127.0.0.1:37063,DS-c2603025-b29b-4678-b265-b194df3ecbcd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33547,DS-ce8d1ceb-6553-4b42-9334-d7c84d70f99d,DISK]) is bad. 2024-11-14T09:30:45,621 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1006583690_22 at /127.0.0.1:44010 [Receiving block BP-2105580279-172.17.0.2-1731576630823:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:33547:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44010 dst: /127.0.0.1:33547 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:30:45,622 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1006583690_22 at /127.0.0.1:56026 [Receiving block BP-2105580279-172.17.0.2-1731576630823:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:37063:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56026 dst: /127.0.0.1:37063 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:30:45,624 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7f59c31d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T09:30:45,624 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4e5cd346{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T09:30:45,624 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T09:30:45,624 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4067923b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T09:30:45,625 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@15717f84{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ffe8a70d-1c55-147c-4cf7-f32b5e8ed0b0/hadoop.log.dir/,STOPPED} 2024-11-14T09:30:45,626 WARN [BP-2105580279-172.17.0.2-1731576630823 heartbeating to localhost/127.0.0.1:36133 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T09:30:45,626 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T09:30:45,626 WARN [BP-2105580279-172.17.0.2-1731576630823 heartbeating to localhost/127.0.0.1:36133 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2105580279-172.17.0.2-1731576630823 (Datanode Uuid 681bb4f6-986c-4484-806b-6e345c1f621c) service to localhost/127.0.0.1:36133 2024-11-14T09:30:45,626 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T09:30:45,627 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ffe8a70d-1c55-147c-4cf7-f32b5e8ed0b0/cluster_81ef3b20-c9a7-5a06-86c1-55a43558ceb4/data/data9/current/BP-2105580279-172.17.0.2-1731576630823 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T09:30:45,627 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ffe8a70d-1c55-147c-4cf7-f32b5e8ed0b0/cluster_81ef3b20-c9a7-5a06-86c1-55a43558ceb4/data/data10/current/BP-2105580279-172.17.0.2-1731576630823 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T09:30:45,627 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T09:30:45,774 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:30:46,700 INFO [regionserver/83f56b55f2af:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:30:47,619 INFO [regionserver/83f56b55f2af:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:30:47,619 WARN [regionserver/83f56b55f2af:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37063,DS-c2603025-b29b-4678-b265-b194df3ecbcd,DISK]] 2024-11-14T09:30:47,620 DEBUG [regionserver/83f56b55f2af:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 83f56b55f2af%2C44811%2C1731576631688:(num 1731576643605) roll requested 2024-11-14T09:30:47,620 INFO [regionserver/83f56b55f2af:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 83f56b55f2af%2C44811%2C1731576631688.1731576647620 2024-11-14T09:30:47,624 WARN [Thread-909 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741839_1021 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33547 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:30:47,624 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1006583690_22 at /127.0.0.1:51042 [Receiving block BP-2105580279-172.17.0.2-1731576630823:blk_1073741839_1021] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ffe8a70d-1c55-147c-4cf7-f32b5e8ed0b0/cluster_81ef3b20-c9a7-5a06-86c1-55a43558ceb4/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ffe8a70d-1c55-147c-4cf7-f32b5e8ed0b0/cluster_81ef3b20-c9a7-5a06-86c1-55a43558ceb4/data/data8]'}, localName='127.0.0.1:42393', datanodeUuid='8d91c2d0-a1ea-4db7-bada-85ae4e8ce831', xmitsInProgress=0}:Exception transferring block BP-2105580279-172.17.0.2-1731576630823:blk_1073741839_1021 to mirror 127.0.0.1:33547 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:30:47,624 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.1731576632110 after 4007ms 2024-11-14T09:30:47,624 WARN [Thread-909 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2105580279-172.17.0.2-1731576630823:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42393,DS-b1363d96-78de-4629-8c46-e5cd4cbb9e02,DISK], DatanodeInfoWithStorage[127.0.0.1:33547,DS-ce8d1ceb-6553-4b42-9334-d7c84d70f99d,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33547,DS-ce8d1ceb-6553-4b42-9334-d7c84d70f99d,DISK]) is bad. 2024-11-14T09:30:47,624 WARN [Thread-909 {}] hdfs.DataStreamer(1850): Abandoning BP-2105580279-172.17.0.2-1731576630823:blk_1073741839_1021 2024-11-14T09:30:47,624 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1006583690_22 at /127.0.0.1:51042 [Receiving block BP-2105580279-172.17.0.2-1731576630823:blk_1073741839_1021] {}] datanode.BlockReceiver(316): Block 1073741839 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-14T09:30:47,625 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1006583690_22 at /127.0.0.1:51042 [Receiving block BP-2105580279-172.17.0.2-1731576630823:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:42393:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51042 dst: /127.0.0.1:42393 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:30:47,627 WARN [Thread-909 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33547,DS-ce8d1ceb-6553-4b42-9334-d7c84d70f99d,DISK] 2024-11-14T09:30:47,630 WARN [Thread-909 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741840_1022 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:30:47,630 WARN [Thread-909 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2105580279-172.17.0.2-1731576630823:blk_1073741840_1022 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46553,DS-820f820f-a0a2-4a8b-914e-94b63dc152d2,DISK], DatanodeInfoWithStorage[127.0.0.1:37063,DS-c2603025-b29b-4678-b265-b194df3ecbcd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46553,DS-820f820f-a0a2-4a8b-914e-94b63dc152d2,DISK]) is bad. 2024-11-14T09:30:47,630 WARN [Thread-909 {}] hdfs.DataStreamer(1850): Abandoning BP-2105580279-172.17.0.2-1731576630823:blk_1073741840_1022 2024-11-14T09:30:47,630 WARN [Thread-909 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46553,DS-820f820f-a0a2-4a8b-914e-94b63dc152d2,DISK] 2024-11-14T09:30:47,632 WARN [Thread-909 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741841_1023 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37737 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:30:47,632 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1006583690_22 at /127.0.0.1:56048 [Receiving block BP-2105580279-172.17.0.2-1731576630823:blk_1073741841_1023] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ffe8a70d-1c55-147c-4cf7-f32b5e8ed0b0/cluster_81ef3b20-c9a7-5a06-86c1-55a43558ceb4/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ffe8a70d-1c55-147c-4cf7-f32b5e8ed0b0/cluster_81ef3b20-c9a7-5a06-86c1-55a43558ceb4/data/data6]'}, localName='127.0.0.1:37063', datanodeUuid='df6d81db-1c2c-407c-840a-6291d4fb7417', xmitsInProgress=0}:Exception transferring block BP-2105580279-172.17.0.2-1731576630823:blk_1073741841_1023 to mirror 127.0.0.1:37737 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:30:47,632 WARN [Thread-909 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2105580279-172.17.0.2-1731576630823:blk_1073741841_1023 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37063,DS-c2603025-b29b-4678-b265-b194df3ecbcd,DISK], DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK]) is bad. 2024-11-14T09:30:47,632 WARN [Thread-909 {}] hdfs.DataStreamer(1850): Abandoning BP-2105580279-172.17.0.2-1731576630823:blk_1073741841_1023 2024-11-14T09:30:47,632 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1006583690_22 at /127.0.0.1:56048 [Receiving block BP-2105580279-172.17.0.2-1731576630823:blk_1073741841_1023] {}] datanode.BlockReceiver(316): Block 1073741841 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-14T09:30:47,632 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1006583690_22 at /127.0.0.1:56048 [Receiving block BP-2105580279-172.17.0.2-1731576630823:blk_1073741841_1023] {}] datanode.DataXceiver(331): 127.0.0.1:37063:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56048 dst: /127.0.0.1:37063 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:30:47,633 WARN [Thread-909 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK] 2024-11-14T09:30:47,635 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-14T09:30:47,637 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:30:47,637 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:30:47,637 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:30:47,637 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:30:47,637 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:30:47,637 INFO [regionserver/83f56b55f2af:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.1731576643605 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.1731576647620 2024-11-14T09:30:47,638 DEBUG [regionserver/83f56b55f2af:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41223:41223),(127.0.0.1/127.0.0.1:34607:34607)] 2024-11-14T09:30:47,638 DEBUG [regionserver/83f56b55f2af:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.1731576632110 is not closed yet, will try archiving it next time 2024-11-14T09:30:47,638 DEBUG [regionserver/83f56b55f2af:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.1731576643605 is not closed yet, will try archiving it next time 2024-11-14T09:30:47,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37063 is added to blk_1073741838_1020 (size=2431) 2024-11-14T09:30:47,775 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:30:48,040 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.1731576632110 is not closed yet, will try archiving it next time 2024-11-14T09:30:48,700 INFO [regionserver/83f56b55f2af:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:30:49,146 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@3671afbf[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37063, datanodeUuid=df6d81db-1c2c-407c-840a-6291d4fb7417, infoPort=41223, infoSecurePort=0, ipcPort=36993, storageInfo=lv=-57;cid=testClusterID;nsid=1875755621;c=1731576630823):Failed to transfer BP-2105580279-172.17.0.2-1731576630823:blk_1073741838_1020 to 127.0.0.1:46553 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:30:49,638 INFO [regionserver/83f56b55f2af:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:30:49,640 WARN [ResponseProcessor for block BP-2105580279-172.17.0.2-1731576630823:blk_1073741842_1024 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-2105580279-172.17.0.2-1731576630823:blk_1073741842_1024 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:30:49,640 WARN [DataStreamer for file /user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.1731576647620 block BP-2105580279-172.17.0.2-1731576630823:blk_1073741842_1024 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2105580279-172.17.0.2-1731576630823:blk_1073741842_1024 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37063,DS-c2603025-b29b-4678-b265-b194df3ecbcd,DISK], DatanodeInfoWithStorage[127.0.0.1:42393,DS-b1363d96-78de-4629-8c46-e5cd4cbb9e02,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37063,DS-c2603025-b29b-4678-b265-b194df3ecbcd,DISK]) is bad. 2024-11-14T09:30:49,641 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1006583690_22 at /127.0.0.1:56058 [Receiving block BP-2105580279-172.17.0.2-1731576630823:blk_1073741842_1024] {}] datanode.DataXceiver(331): 127.0.0.1:37063:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56058 dst: /127.0.0.1:37063 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:30:49,641 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1006583690_22 at /127.0.0.1:51050 [Receiving block BP-2105580279-172.17.0.2-1731576630823:blk_1073741842_1024] {}] datanode.DataXceiver(331): 127.0.0.1:42393:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51050 dst: /127.0.0.1:42393 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:30:49,643 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5a884c76{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T09:30:49,643 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@32dff706{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T09:30:49,643 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T09:30:49,643 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4d9228f7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T09:30:49,643 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6134c032{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ffe8a70d-1c55-147c-4cf7-f32b5e8ed0b0/hadoop.log.dir/,STOPPED} 2024-11-14T09:30:49,645 WARN [BP-2105580279-172.17.0.2-1731576630823 heartbeating to localhost/127.0.0.1:36133 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T09:30:49,645 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T09:30:49,645 WARN [BP-2105580279-172.17.0.2-1731576630823 heartbeating to localhost/127.0.0.1:36133 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2105580279-172.17.0.2-1731576630823 (Datanode Uuid df6d81db-1c2c-407c-840a-6291d4fb7417) service to localhost/127.0.0.1:36133 2024-11-14T09:30:49,645 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T09:30:49,645 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ffe8a70d-1c55-147c-4cf7-f32b5e8ed0b0/cluster_81ef3b20-c9a7-5a06-86c1-55a43558ceb4/data/data5/current/BP-2105580279-172.17.0.2-1731576630823 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T09:30:49,645 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ffe8a70d-1c55-147c-4cf7-f32b5e8ed0b0/cluster_81ef3b20-c9a7-5a06-86c1-55a43558ceb4/data/data6/current/BP-2105580279-172.17.0.2-1731576630823 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T09:30:49,646 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T09:30:49,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44811 {}] regionserver.HRegion(8855): Flush requested on 6901bf69dd23f7cb187b9800f3d62146 2024-11-14T09:30:49,654 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 6901bf69dd23f7cb187b9800f3d62146 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-14T09:30:49,676 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/.tmp/info/e030a042cda1436ebb28492b5588b32e is 1080, key is row0002/info:/1731576645629/Put/seqid=0 2024-11-14T09:30:49,678 WARN [Thread-923 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741843_1026 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:30:49,678 WARN [Thread-923 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2105580279-172.17.0.2-1731576630823:blk_1073741843_1026 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK], DatanodeInfoWithStorage[127.0.0.1:37063,DS-c2603025-b29b-4678-b265-b194df3ecbcd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK]) is bad. 2024-11-14T09:30:49,678 WARN [Thread-923 {}] hdfs.DataStreamer(1850): Abandoning BP-2105580279-172.17.0.2-1731576630823:blk_1073741843_1026 2024-11-14T09:30:49,678 WARN [Thread-923 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK] 2024-11-14T09:30:49,680 WARN [Thread-923 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741844_1027 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:30:49,680 WARN [Thread-923 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2105580279-172.17.0.2-1731576630823:blk_1073741844_1027 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37063,DS-c2603025-b29b-4678-b265-b194df3ecbcd,DISK], DatanodeInfoWithStorage[127.0.0.1:46553,DS-820f820f-a0a2-4a8b-914e-94b63dc152d2,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37063,DS-c2603025-b29b-4678-b265-b194df3ecbcd,DISK]) is bad. 2024-11-14T09:30:49,680 WARN [Thread-923 {}] hdfs.DataStreamer(1850): Abandoning BP-2105580279-172.17.0.2-1731576630823:blk_1073741844_1027 2024-11-14T09:30:49,681 WARN [Thread-923 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37063,DS-c2603025-b29b-4678-b265-b194df3ecbcd,DISK] 2024-11-14T09:30:49,682 WARN [Thread-923 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741845_1028 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:30:49,682 WARN [Thread-923 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2105580279-172.17.0.2-1731576630823:blk_1073741845_1028 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46553,DS-820f820f-a0a2-4a8b-914e-94b63dc152d2,DISK], DatanodeInfoWithStorage[127.0.0.1:33547,DS-ce8d1ceb-6553-4b42-9334-d7c84d70f99d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46553,DS-820f820f-a0a2-4a8b-914e-94b63dc152d2,DISK]) is bad. 2024-11-14T09:30:49,682 WARN [Thread-923 {}] hdfs.DataStreamer(1850): Abandoning BP-2105580279-172.17.0.2-1731576630823:blk_1073741845_1028 2024-11-14T09:30:49,683 WARN [Thread-923 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46553,DS-820f820f-a0a2-4a8b-914e-94b63dc152d2,DISK] 2024-11-14T09:30:49,684 WARN [Thread-923 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741846_1029 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:30:49,684 WARN [Thread-923 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2105580279-172.17.0.2-1731576630823:blk_1073741846_1029 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33547,DS-ce8d1ceb-6553-4b42-9334-d7c84d70f99d,DISK], DatanodeInfoWithStorage[127.0.0.1:42393,DS-b1363d96-78de-4629-8c46-e5cd4cbb9e02,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33547,DS-ce8d1ceb-6553-4b42-9334-d7c84d70f99d,DISK]) is bad. 2024-11-14T09:30:49,684 WARN [Thread-923 {}] hdfs.DataStreamer(1850): Abandoning BP-2105580279-172.17.0.2-1731576630823:blk_1073741846_1029 2024-11-14T09:30:49,685 WARN [Thread-923 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33547,DS-ce8d1ceb-6553-4b42-9334-d7c84d70f99d,DISK] 2024-11-14T09:30:49,685 WARN [IPC Server handler 0 on default port 36133 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-14T09:30:49,686 WARN [IPC Server handler 0 on default port 36133 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-14T09:30:49,686 WARN [IPC Server handler 0 on default port 36133 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-14T09:30:49,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42393 is added to blk_1073741847_1030 (size=10347) 2024-11-14T09:30:49,775 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:30:50,090 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/.tmp/info/e030a042cda1436ebb28492b5588b32e 2024-11-14T09:30:50,098 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/.tmp/info/e030a042cda1436ebb28492b5588b32e as hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/info/e030a042cda1436ebb28492b5588b32e 2024-11-14T09:30:50,105 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/info/e030a042cda1436ebb28492b5588b32e, entries=5, sequenceid=11, filesize=10.1 K 2024-11-14T09:30:50,106 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.45 KB/9681 for 6901bf69dd23f7cb187b9800f3d62146 in 452ms, sequenceid=11, compaction requested=false 2024-11-14T09:30:50,106 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 6901bf69dd23f7cb187b9800f3d62146: 2024-11-14T09:30:50,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44811 {}] regionserver.HRegion(8855): Flush requested on 6901bf69dd23f7cb187b9800f3d62146 2024-11-14T09:30:50,277 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 6901bf69dd23f7cb187b9800f3d62146 1/1 column families, dataSize=10.50 KB heapSize=11.50 KB 2024-11-14T09:30:50,282 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/.tmp/info/3cbf54693eb8475d883341abf8e9d075 is 1080, key is row0007/info:/1731576649655/Put/seqid=0 2024-11-14T09:30:50,283 WARN [Thread-928 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741848_1031 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:30:50,284 WARN [Thread-928 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2105580279-172.17.0.2-1731576630823:blk_1073741848_1031 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37063,DS-c2603025-b29b-4678-b265-b194df3ecbcd,DISK], DatanodeInfoWithStorage[127.0.0.1:46553,DS-820f820f-a0a2-4a8b-914e-94b63dc152d2,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37063,DS-c2603025-b29b-4678-b265-b194df3ecbcd,DISK]) is bad. 2024-11-14T09:30:50,284 WARN [Thread-928 {}] hdfs.DataStreamer(1850): Abandoning BP-2105580279-172.17.0.2-1731576630823:blk_1073741848_1031 2024-11-14T09:30:50,284 WARN [Thread-928 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37063,DS-c2603025-b29b-4678-b265-b194df3ecbcd,DISK] 2024-11-14T09:30:50,285 WARN [Thread-928 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741849_1032 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:30:50,286 WARN [Thread-928 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2105580279-172.17.0.2-1731576630823:blk_1073741849_1032 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46553,DS-820f820f-a0a2-4a8b-914e-94b63dc152d2,DISK], DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46553,DS-820f820f-a0a2-4a8b-914e-94b63dc152d2,DISK]) is bad. 2024-11-14T09:30:50,286 WARN [Thread-928 {}] hdfs.DataStreamer(1850): Abandoning BP-2105580279-172.17.0.2-1731576630823:blk_1073741849_1032 2024-11-14T09:30:50,286 WARN [Thread-928 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46553,DS-820f820f-a0a2-4a8b-914e-94b63dc152d2,DISK] 2024-11-14T09:30:50,287 WARN [Thread-928 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741850_1033 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:30:50,287 WARN [Thread-928 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2105580279-172.17.0.2-1731576630823:blk_1073741850_1033 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33547,DS-ce8d1ceb-6553-4b42-9334-d7c84d70f99d,DISK], DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33547,DS-ce8d1ceb-6553-4b42-9334-d7c84d70f99d,DISK]) is bad. 2024-11-14T09:30:50,287 WARN [Thread-928 {}] hdfs.DataStreamer(1850): Abandoning BP-2105580279-172.17.0.2-1731576630823:blk_1073741850_1033 2024-11-14T09:30:50,288 WARN [Thread-928 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33547,DS-ce8d1ceb-6553-4b42-9334-d7c84d70f99d,DISK] 2024-11-14T09:30:50,290 WARN [Thread-928 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741851_1034 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37737 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:30:50,290 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1006583690_22 at /127.0.0.1:51088 [Receiving block BP-2105580279-172.17.0.2-1731576630823:blk_1073741851_1034] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ffe8a70d-1c55-147c-4cf7-f32b5e8ed0b0/cluster_81ef3b20-c9a7-5a06-86c1-55a43558ceb4/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ffe8a70d-1c55-147c-4cf7-f32b5e8ed0b0/cluster_81ef3b20-c9a7-5a06-86c1-55a43558ceb4/data/data8]'}, localName='127.0.0.1:42393', datanodeUuid='8d91c2d0-a1ea-4db7-bada-85ae4e8ce831', xmitsInProgress=0}:Exception transferring block BP-2105580279-172.17.0.2-1731576630823:blk_1073741851_1034 to mirror 127.0.0.1:37737 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:30:50,290 WARN [Thread-928 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2105580279-172.17.0.2-1731576630823:blk_1073741851_1034 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42393,DS-b1363d96-78de-4629-8c46-e5cd4cbb9e02,DISK], DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK]) is bad. 2024-11-14T09:30:50,290 WARN [Thread-928 {}] hdfs.DataStreamer(1850): Abandoning BP-2105580279-172.17.0.2-1731576630823:blk_1073741851_1034 2024-11-14T09:30:50,290 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1006583690_22 at /127.0.0.1:51088 [Receiving block BP-2105580279-172.17.0.2-1731576630823:blk_1073741851_1034] {}] datanode.BlockReceiver(316): Block 1073741851 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-14T09:30:50,290 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1006583690_22 at /127.0.0.1:51088 [Receiving block BP-2105580279-172.17.0.2-1731576630823:blk_1073741851_1034] {}] datanode.DataXceiver(331): 127.0.0.1:42393:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51088 dst: /127.0.0.1:42393 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:30:50,290 WARN [Thread-928 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK] 2024-11-14T09:30:50,291 WARN [IPC Server handler 0 on default port 36133 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-14T09:30:50,291 WARN [IPC Server handler 0 on default port 36133 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-14T09:30:50,291 WARN [IPC Server handler 0 on default port 36133 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-14T09:30:50,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42393 is added to blk_1073741852_1035 (size=12506) 2024-11-14T09:30:50,700 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.50 KB at sequenceid=24 (bloomFilter=true), to=hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/.tmp/info/3cbf54693eb8475d883341abf8e9d075 2024-11-14T09:30:50,700 INFO [regionserver/83f56b55f2af:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:30:50,707 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/.tmp/info/3cbf54693eb8475d883341abf8e9d075 as hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/info/3cbf54693eb8475d883341abf8e9d075 2024-11-14T09:30:50,712 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/info/3cbf54693eb8475d883341abf8e9d075, entries=7, sequenceid=24, filesize=12.2 K 2024-11-14T09:30:50,713 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.50 KB/10756, heapSize ~11.48 KB/11760, currentSize=2.10 KB/2150 for 6901bf69dd23f7cb187b9800f3d62146 in 436ms, sequenceid=24, compaction requested=false 2024-11-14T09:30:50,713 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 6901bf69dd23f7cb187b9800f3d62146: 2024-11-14T09:30:50,714 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=22.3 K, sizeToCheck=16.0 K 2024-11-14T09:30:50,714 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T09:30:50,714 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/info/3cbf54693eb8475d883341abf8e9d075 because midkey is the same as first or last row 2024-11-14T09:30:51,639 INFO [regionserver/83f56b55f2af:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:30:51,639 WARN [regionserver/83f56b55f2af:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42393,DS-b1363d96-78de-4629-8c46-e5cd4cbb9e02,DISK]] 2024-11-14T09:30:51,639 DEBUG [regionserver/83f56b55f2af:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 83f56b55f2af%2C44811%2C1731576631688:(num 1731576647620) roll requested 2024-11-14T09:30:51,640 INFO [regionserver/83f56b55f2af:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 83f56b55f2af%2C44811%2C1731576631688.1731576651639 2024-11-14T09:30:51,643 WARN [Thread-933 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741853_1036 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:30:51,643 WARN [Thread-933 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2105580279-172.17.0.2-1731576630823:blk_1073741853_1036 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33547,DS-ce8d1ceb-6553-4b42-9334-d7c84d70f99d,DISK], DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33547,DS-ce8d1ceb-6553-4b42-9334-d7c84d70f99d,DISK]) is bad. 2024-11-14T09:30:51,643 WARN [Thread-933 {}] hdfs.DataStreamer(1850): Abandoning BP-2105580279-172.17.0.2-1731576630823:blk_1073741853_1036 2024-11-14T09:30:51,644 WARN [Thread-933 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33547,DS-ce8d1ceb-6553-4b42-9334-d7c84d70f99d,DISK] 2024-11-14T09:30:51,645 WARN [Thread-933 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741854_1037 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:30:51,645 WARN [Thread-933 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2105580279-172.17.0.2-1731576630823:blk_1073741854_1037 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK], DatanodeInfoWithStorage[127.0.0.1:37063,DS-c2603025-b29b-4678-b265-b194df3ecbcd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK]) is bad. 2024-11-14T09:30:51,645 WARN [Thread-933 {}] hdfs.DataStreamer(1850): Abandoning BP-2105580279-172.17.0.2-1731576630823:blk_1073741854_1037 2024-11-14T09:30:51,646 WARN [Thread-933 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK] 2024-11-14T09:30:51,647 WARN [Thread-933 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741855_1038 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:30:51,647 WARN [Thread-933 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2105580279-172.17.0.2-1731576630823:blk_1073741855_1038 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37063,DS-c2603025-b29b-4678-b265-b194df3ecbcd,DISK], DatanodeInfoWithStorage[127.0.0.1:42393,DS-b1363d96-78de-4629-8c46-e5cd4cbb9e02,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37063,DS-c2603025-b29b-4678-b265-b194df3ecbcd,DISK]) is bad. 2024-11-14T09:30:51,647 WARN [Thread-933 {}] hdfs.DataStreamer(1850): Abandoning BP-2105580279-172.17.0.2-1731576630823:blk_1073741855_1038 2024-11-14T09:30:51,647 WARN [Thread-933 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37063,DS-c2603025-b29b-4678-b265-b194df3ecbcd,DISK] 2024-11-14T09:30:51,649 WARN [Thread-933 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741856_1039 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:46553 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:30:51,649 WARN [Thread-933 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2105580279-172.17.0.2-1731576630823:blk_1073741856_1039 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42393,DS-b1363d96-78de-4629-8c46-e5cd4cbb9e02,DISK], DatanodeInfoWithStorage[127.0.0.1:46553,DS-820f820f-a0a2-4a8b-914e-94b63dc152d2,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:46553,DS-820f820f-a0a2-4a8b-914e-94b63dc152d2,DISK]) is bad. 2024-11-14T09:30:51,649 WARN [Thread-933 {}] hdfs.DataStreamer(1850): Abandoning BP-2105580279-172.17.0.2-1731576630823:blk_1073741856_1039 2024-11-14T09:30:51,649 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1006583690_22 at /127.0.0.1:35430 [Receiving block BP-2105580279-172.17.0.2-1731576630823:blk_1073741856_1039] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ffe8a70d-1c55-147c-4cf7-f32b5e8ed0b0/cluster_81ef3b20-c9a7-5a06-86c1-55a43558ceb4/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ffe8a70d-1c55-147c-4cf7-f32b5e8ed0b0/cluster_81ef3b20-c9a7-5a06-86c1-55a43558ceb4/data/data8]'}, localName='127.0.0.1:42393', datanodeUuid='8d91c2d0-a1ea-4db7-bada-85ae4e8ce831', xmitsInProgress=0}:Exception transferring block BP-2105580279-172.17.0.2-1731576630823:blk_1073741856_1039 to mirror 127.0.0.1:46553 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:30:51,650 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1006583690_22 at /127.0.0.1:35430 [Receiving block BP-2105580279-172.17.0.2-1731576630823:blk_1073741856_1039] {}] datanode.BlockReceiver(316): Block 1073741856 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-14T09:30:51,650 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1006583690_22 at /127.0.0.1:35430 [Receiving block BP-2105580279-172.17.0.2-1731576630823:blk_1073741856_1039] {}] datanode.DataXceiver(331): 127.0.0.1:42393:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35430 dst: /127.0.0.1:42393 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:30:51,650 WARN [Thread-933 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46553,DS-820f820f-a0a2-4a8b-914e-94b63dc152d2,DISK] 2024-11-14T09:30:51,651 WARN [IPC Server handler 1 on default port 36133 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-14T09:30:51,651 WARN [IPC Server handler 1 on default port 36133 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-14T09:30:51,651 WARN [IPC Server handler 1 on default port 36133 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-14T09:30:51,654 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:30:51,654 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:30:51,654 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:30:51,654 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:30:51,654 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:30:51,655 INFO [regionserver/83f56b55f2af:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.1731576647620 with entries=25, filesize=25.38 KB; new WAL /user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.1731576651639 2024-11-14T09:30:51,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42393 is added to blk_1073741842_1025 (size=25992) 2024-11-14T09:30:51,659 DEBUG [regionserver/83f56b55f2af:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34607:34607)] 2024-11-14T09:30:51,659 DEBUG [regionserver/83f56b55f2af:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.1731576632110 is not closed yet, will try archiving it next time 2024-11-14T09:30:51,659 DEBUG [regionserver/83f56b55f2af:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.1731576647620 is not closed yet, will try archiving it next time 2024-11-14T09:30:51,659 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.1731576643605 to hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/oldWALs/83f56b55f2af%2C44811%2C1731576631688.1731576643605 2024-11-14T09:30:51,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44811 {}] regionserver.HRegion(8855): Flush requested on 6901bf69dd23f7cb187b9800f3d62146 2024-11-14T09:30:51,695 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 6901bf69dd23f7cb187b9800f3d62146 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-14T09:30:51,701 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/.tmp/info/fc0dabbfb9174dff89a6676ee66aa0a2 is 1079, key is tmprow/info:/1731576651694/Put/seqid=0 2024-11-14T09:30:51,704 WARN [Thread-938 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741858_1041 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:30:51,704 WARN [Thread-938 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2105580279-172.17.0.2-1731576630823:blk_1073741858_1041 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK], DatanodeInfoWithStorage[127.0.0.1:42393,DS-b1363d96-78de-4629-8c46-e5cd4cbb9e02,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK]) is bad. 2024-11-14T09:30:51,704 WARN [Thread-938 {}] hdfs.DataStreamer(1850): Abandoning BP-2105580279-172.17.0.2-1731576630823:blk_1073741858_1041 2024-11-14T09:30:51,704 WARN [Thread-938 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK] 2024-11-14T09:30:51,706 WARN [Thread-938 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741859_1042 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:30:51,706 WARN [Thread-938 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2105580279-172.17.0.2-1731576630823:blk_1073741859_1042 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46553,DS-820f820f-a0a2-4a8b-914e-94b63dc152d2,DISK], DatanodeInfoWithStorage[127.0.0.1:37063,DS-c2603025-b29b-4678-b265-b194df3ecbcd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46553,DS-820f820f-a0a2-4a8b-914e-94b63dc152d2,DISK]) is bad. 2024-11-14T09:30:51,706 WARN [Thread-938 {}] hdfs.DataStreamer(1850): Abandoning BP-2105580279-172.17.0.2-1731576630823:blk_1073741859_1042 2024-11-14T09:30:51,706 WARN [Thread-938 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46553,DS-820f820f-a0a2-4a8b-914e-94b63dc152d2,DISK] 2024-11-14T09:30:51,707 WARN [Thread-938 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741860_1043 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:30:51,707 WARN [Thread-938 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2105580279-172.17.0.2-1731576630823:blk_1073741860_1043 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33547,DS-ce8d1ceb-6553-4b42-9334-d7c84d70f99d,DISK], DatanodeInfoWithStorage[127.0.0.1:37063,DS-c2603025-b29b-4678-b265-b194df3ecbcd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33547,DS-ce8d1ceb-6553-4b42-9334-d7c84d70f99d,DISK]) is bad. 2024-11-14T09:30:51,707 WARN [Thread-938 {}] hdfs.DataStreamer(1850): Abandoning BP-2105580279-172.17.0.2-1731576630823:blk_1073741860_1043 2024-11-14T09:30:51,708 WARN [Thread-938 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33547,DS-ce8d1ceb-6553-4b42-9334-d7c84d70f99d,DISK] 2024-11-14T09:30:51,709 WARN [Thread-938 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741861_1044 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:30:51,709 WARN [Thread-938 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2105580279-172.17.0.2-1731576630823:blk_1073741861_1044 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37063,DS-c2603025-b29b-4678-b265-b194df3ecbcd,DISK], DatanodeInfoWithStorage[127.0.0.1:42393,DS-b1363d96-78de-4629-8c46-e5cd4cbb9e02,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37063,DS-c2603025-b29b-4678-b265-b194df3ecbcd,DISK]) is bad. 2024-11-14T09:30:51,709 WARN [Thread-938 {}] hdfs.DataStreamer(1850): Abandoning BP-2105580279-172.17.0.2-1731576630823:blk_1073741861_1044 2024-11-14T09:30:51,710 WARN [Thread-938 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37063,DS-c2603025-b29b-4678-b265-b194df3ecbcd,DISK] 2024-11-14T09:30:51,710 WARN [IPC Server handler 0 on default port 36133 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-14T09:30:51,710 WARN [IPC Server handler 0 on default port 36133 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-14T09:30:51,711 WARN [IPC Server handler 0 on default port 36133 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-14T09:30:51,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42393 is added to blk_1073741862_1045 (size=6027) 2024-11-14T09:30:51,775 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:30:52,060 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.1731576632110 is not closed yet, will try archiving it next time 2024-11-14T09:30:52,115 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=34 (bloomFilter=true), to=hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/.tmp/info/fc0dabbfb9174dff89a6676ee66aa0a2 2024-11-14T09:30:52,121 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/.tmp/info/fc0dabbfb9174dff89a6676ee66aa0a2 as hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/info/fc0dabbfb9174dff89a6676ee66aa0a2 2024-11-14T09:30:52,126 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/info/fc0dabbfb9174dff89a6676ee66aa0a2, entries=1, sequenceid=34, filesize=5.9 K 2024-11-14T09:30:52,127 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 6901bf69dd23f7cb187b9800f3d62146 in 432ms, sequenceid=34, compaction requested=true 2024-11-14T09:30:52,128 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 6901bf69dd23f7cb187b9800f3d62146: 2024-11-14T09:30:52,128 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.2 K, sizeToCheck=16.0 K 2024-11-14T09:30:52,128 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T09:30:52,128 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/info/3cbf54693eb8475d883341abf8e9d075 because midkey is the same as first or last row 2024-11-14T09:30:52,128 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6901bf69dd23f7cb187b9800f3d62146:info, priority=-2147483648, current under compaction store size is 1 2024-11-14T09:30:52,128 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T09:30:52,128 DEBUG [RS:0;83f56b55f2af:44811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-14T09:30:52,129 DEBUG [RS:0;83f56b55f2af:44811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 28880 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-14T09:30:52,129 DEBUG [RS:0;83f56b55f2af:44811-shortCompactions-0 {}] regionserver.HStore(1541): 6901bf69dd23f7cb187b9800f3d62146/info is initiating minor compaction (all files) 2024-11-14T09:30:52,130 INFO [RS:0;83f56b55f2af:44811-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 6901bf69dd23f7cb187b9800f3d62146/info in TestLogRolling-testLogRollOnDatanodeDeath,,1731576632731.6901bf69dd23f7cb187b9800f3d62146. 2024-11-14T09:30:52,130 INFO [RS:0;83f56b55f2af:44811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/info/e030a042cda1436ebb28492b5588b32e, hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/info/3cbf54693eb8475d883341abf8e9d075, hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/info/fc0dabbfb9174dff89a6676ee66aa0a2] into tmpdir=hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/.tmp, totalSize=28.2 K 2024-11-14T09:30:52,130 DEBUG [RS:0;83f56b55f2af:44811-shortCompactions-0 {}] compactions.Compactor(225): Compacting e030a042cda1436ebb28492b5588b32e, keycount=5, bloomtype=ROW, size=10.1 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731576645629 2024-11-14T09:30:52,130 DEBUG [RS:0;83f56b55f2af:44811-shortCompactions-0 {}] compactions.Compactor(225): Compacting 3cbf54693eb8475d883341abf8e9d075, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=24, earliestPutTs=1731576649655 2024-11-14T09:30:52,131 DEBUG [RS:0;83f56b55f2af:44811-shortCompactions-0 {}] compactions.Compactor(225): Compacting fc0dabbfb9174dff89a6676ee66aa0a2, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1731576651694 2024-11-14T09:30:52,144 INFO [RS:0;83f56b55f2af:44811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6901bf69dd23f7cb187b9800f3d62146#info#compaction#21 average throughput is 6.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-14T09:30:52,144 DEBUG [RS:0;83f56b55f2af:44811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/.tmp/info/11c5d17733b24d30bcc63bc432b3bffe is 1080, key is row0002/info:/1731576645629/Put/seqid=0 2024-11-14T09:30:52,146 WARN [Thread-944 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741863_1046 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:30:52,146 WARN [Thread-944 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2105580279-172.17.0.2-1731576630823:blk_1073741863_1046 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46553,DS-820f820f-a0a2-4a8b-914e-94b63dc152d2,DISK], DatanodeInfoWithStorage[127.0.0.1:42393,DS-b1363d96-78de-4629-8c46-e5cd4cbb9e02,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46553,DS-820f820f-a0a2-4a8b-914e-94b63dc152d2,DISK]) is bad. 2024-11-14T09:30:52,146 WARN [Thread-944 {}] hdfs.DataStreamer(1850): Abandoning BP-2105580279-172.17.0.2-1731576630823:blk_1073741863_1046 2024-11-14T09:30:52,147 WARN [Thread-944 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46553,DS-820f820f-a0a2-4a8b-914e-94b63dc152d2,DISK] 2024-11-14T09:30:52,148 WARN [Thread-944 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741864_1047 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:30:52,148 WARN [Thread-944 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2105580279-172.17.0.2-1731576630823:blk_1073741864_1047 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37063,DS-c2603025-b29b-4678-b265-b194df3ecbcd,DISK], DatanodeInfoWithStorage[127.0.0.1:33547,DS-ce8d1ceb-6553-4b42-9334-d7c84d70f99d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37063,DS-c2603025-b29b-4678-b265-b194df3ecbcd,DISK]) is bad. 2024-11-14T09:30:52,148 WARN [Thread-944 {}] hdfs.DataStreamer(1850): Abandoning BP-2105580279-172.17.0.2-1731576630823:blk_1073741864_1047 2024-11-14T09:30:52,148 WARN [Thread-944 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37063,DS-c2603025-b29b-4678-b265-b194df3ecbcd,DISK] 2024-11-14T09:30:52,150 WARN [Thread-944 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741865_1048 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33547 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:30:52,150 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1006583690_22 at /127.0.0.1:35476 [Receiving block BP-2105580279-172.17.0.2-1731576630823:blk_1073741865_1048] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ffe8a70d-1c55-147c-4cf7-f32b5e8ed0b0/cluster_81ef3b20-c9a7-5a06-86c1-55a43558ceb4/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ffe8a70d-1c55-147c-4cf7-f32b5e8ed0b0/cluster_81ef3b20-c9a7-5a06-86c1-55a43558ceb4/data/data8]'}, localName='127.0.0.1:42393', datanodeUuid='8d91c2d0-a1ea-4db7-bada-85ae4e8ce831', xmitsInProgress=0}:Exception transferring block BP-2105580279-172.17.0.2-1731576630823:blk_1073741865_1048 to mirror 127.0.0.1:33547 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:30:52,150 WARN [Thread-944 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2105580279-172.17.0.2-1731576630823:blk_1073741865_1048 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42393,DS-b1363d96-78de-4629-8c46-e5cd4cbb9e02,DISK], DatanodeInfoWithStorage[127.0.0.1:33547,DS-ce8d1ceb-6553-4b42-9334-d7c84d70f99d,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33547,DS-ce8d1ceb-6553-4b42-9334-d7c84d70f99d,DISK]) is bad. 2024-11-14T09:30:52,150 WARN [Thread-944 {}] hdfs.DataStreamer(1850): Abandoning BP-2105580279-172.17.0.2-1731576630823:blk_1073741865_1048 2024-11-14T09:30:52,150 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1006583690_22 at /127.0.0.1:35476 [Receiving block BP-2105580279-172.17.0.2-1731576630823:blk_1073741865_1048] {}] datanode.BlockReceiver(316): Block 1073741865 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-14T09:30:52,150 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1006583690_22 at /127.0.0.1:35476 [Receiving block BP-2105580279-172.17.0.2-1731576630823:blk_1073741865_1048] {}] datanode.DataXceiver(331): 127.0.0.1:42393:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35476 dst: /127.0.0.1:42393 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:30:52,151 WARN [Thread-944 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33547,DS-ce8d1ceb-6553-4b42-9334-d7c84d70f99d,DISK] 2024-11-14T09:30:52,152 WARN [Thread-944 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741866_1049 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:30:52,152 WARN [Thread-944 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2105580279-172.17.0.2-1731576630823:blk_1073741866_1049 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK], DatanodeInfoWithStorage[127.0.0.1:42393,DS-b1363d96-78de-4629-8c46-e5cd4cbb9e02,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK]) is bad. 2024-11-14T09:30:52,152 WARN [Thread-944 {}] hdfs.DataStreamer(1850): Abandoning BP-2105580279-172.17.0.2-1731576630823:blk_1073741866_1049 2024-11-14T09:30:52,152 WARN [Thread-944 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK] 2024-11-14T09:30:52,153 WARN [IPC Server handler 4 on default port 36133 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-14T09:30:52,153 WARN [IPC Server handler 4 on default port 36133 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-14T09:30:52,153 WARN [IPC Server handler 4 on default port 36133 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-14T09:30:52,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42393 is added to blk_1073741867_1050 (size=17994) 2024-11-14T09:30:52,371 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@23a35d00[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:42393, datanodeUuid=8d91c2d0-a1ea-4db7-bada-85ae4e8ce831, infoPort=34607, infoSecurePort=0, ipcPort=45647, storageInfo=lv=-57;cid=testClusterID;nsid=1875755621;c=1731576630823):Failed to transfer BP-2105580279-172.17.0.2-1731576630823:blk_1073741847_1030 to 127.0.0.1:37063 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:30:52,372 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@279ccdac[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:42393, datanodeUuid=8d91c2d0-a1ea-4db7-bada-85ae4e8ce831, infoPort=34607, infoSecurePort=0, ipcPort=45647, storageInfo=lv=-57;cid=testClusterID;nsid=1875755621;c=1731576630823):Failed to transfer BP-2105580279-172.17.0.2-1731576630823:blk_1073741852_1035 to 127.0.0.1:46553 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:30:52,563 DEBUG [RS:0;83f56b55f2af:44811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/.tmp/info/11c5d17733b24d30bcc63bc432b3bffe as hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/info/11c5d17733b24d30bcc63bc432b3bffe 2024-11-14T09:30:52,570 INFO [RS:0;83f56b55f2af:44811-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 6901bf69dd23f7cb187b9800f3d62146/info of 6901bf69dd23f7cb187b9800f3d62146 into 11c5d17733b24d30bcc63bc432b3bffe(size=17.6 K), total size for store is 17.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-14T09:30:52,571 DEBUG [RS:0;83f56b55f2af:44811-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 6901bf69dd23f7cb187b9800f3d62146: 2024-11-14T09:30:52,571 INFO [RS:0;83f56b55f2af:44811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1731576632731.6901bf69dd23f7cb187b9800f3d62146., storeName=6901bf69dd23f7cb187b9800f3d62146/info, priority=13, startTime=1731576652128; duration=0sec 2024-11-14T09:30:52,571 DEBUG [RS:0;83f56b55f2af:44811-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-14T09:30:52,571 DEBUG [RS:0;83f56b55f2af:44811-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T09:30:52,571 DEBUG [RS:0;83f56b55f2af:44811-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/info/11c5d17733b24d30bcc63bc432b3bffe because midkey is the same as first or last row 2024-11-14T09:30:52,571 DEBUG [RS:0;83f56b55f2af:44811-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-14T09:30:52,571 DEBUG [RS:0;83f56b55f2af:44811-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T09:30:52,571 DEBUG [RS:0;83f56b55f2af:44811-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/info/11c5d17733b24d30bcc63bc432b3bffe because midkey is the same as first or last row 2024-11-14T09:30:52,571 DEBUG [RS:0;83f56b55f2af:44811-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-14T09:30:52,571 DEBUG [RS:0;83f56b55f2af:44811-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T09:30:52,572 DEBUG [RS:0;83f56b55f2af:44811-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/info/11c5d17733b24d30bcc63bc432b3bffe because midkey is the same as first or last row 2024-11-14T09:30:52,572 DEBUG [RS:0;83f56b55f2af:44811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T09:30:52,572 DEBUG [RS:0;83f56b55f2af:44811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6901bf69dd23f7cb187b9800f3d62146:info 2024-11-14T09:30:52,701 INFO [regionserver/83f56b55f2af:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:30:53,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44811 {}] regionserver.HRegion(8855): Flush requested on 6901bf69dd23f7cb187b9800f3d62146 2024-11-14T09:30:53,112 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 6901bf69dd23f7cb187b9800f3d62146 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-14T09:30:53,116 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/.tmp/info/45e66f42f05940fb8a2564a8c7886e27 is 1079, key is tmprow/info:/1731576653111/Put/seqid=0 2024-11-14T09:30:53,119 WARN [Thread-952 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741868_1051 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37063 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:30:53,119 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1006583690_22 at /127.0.0.1:35496 [Receiving block BP-2105580279-172.17.0.2-1731576630823:blk_1073741868_1051] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ffe8a70d-1c55-147c-4cf7-f32b5e8ed0b0/cluster_81ef3b20-c9a7-5a06-86c1-55a43558ceb4/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ffe8a70d-1c55-147c-4cf7-f32b5e8ed0b0/cluster_81ef3b20-c9a7-5a06-86c1-55a43558ceb4/data/data8]'}, localName='127.0.0.1:42393', datanodeUuid='8d91c2d0-a1ea-4db7-bada-85ae4e8ce831', xmitsInProgress=0}:Exception transferring block BP-2105580279-172.17.0.2-1731576630823:blk_1073741868_1051 to mirror 127.0.0.1:37063 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:30:53,119 WARN [Thread-952 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2105580279-172.17.0.2-1731576630823:blk_1073741868_1051 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42393,DS-b1363d96-78de-4629-8c46-e5cd4cbb9e02,DISK], DatanodeInfoWithStorage[127.0.0.1:37063,DS-c2603025-b29b-4678-b265-b194df3ecbcd,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37063,DS-c2603025-b29b-4678-b265-b194df3ecbcd,DISK]) is bad. 2024-11-14T09:30:53,119 WARN [Thread-952 {}] hdfs.DataStreamer(1850): Abandoning BP-2105580279-172.17.0.2-1731576630823:blk_1073741868_1051 2024-11-14T09:30:53,119 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1006583690_22 at /127.0.0.1:35496 [Receiving block BP-2105580279-172.17.0.2-1731576630823:blk_1073741868_1051] {}] datanode.BlockReceiver(316): Block 1073741868 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-14T09:30:53,119 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1006583690_22 at /127.0.0.1:35496 [Receiving block BP-2105580279-172.17.0.2-1731576630823:blk_1073741868_1051] {}] datanode.DataXceiver(331): 127.0.0.1:42393:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35496 dst: /127.0.0.1:42393 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:30:53,120 WARN [Thread-952 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37063,DS-c2603025-b29b-4678-b265-b194df3ecbcd,DISK] 2024-11-14T09:30:53,121 WARN [Thread-952 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741869_1052 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:30:53,121 WARN [Thread-952 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2105580279-172.17.0.2-1731576630823:blk_1073741869_1052 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK], DatanodeInfoWithStorage[127.0.0.1:33547,DS-ce8d1ceb-6553-4b42-9334-d7c84d70f99d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK]) is bad. 2024-11-14T09:30:53,121 WARN [Thread-952 {}] hdfs.DataStreamer(1850): Abandoning BP-2105580279-172.17.0.2-1731576630823:blk_1073741869_1052 2024-11-14T09:30:53,122 WARN [Thread-952 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK] 2024-11-14T09:30:53,123 WARN [Thread-952 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741870_1053 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:30:53,123 WARN [Thread-952 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2105580279-172.17.0.2-1731576630823:blk_1073741870_1053 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46553,DS-820f820f-a0a2-4a8b-914e-94b63dc152d2,DISK], DatanodeInfoWithStorage[127.0.0.1:42393,DS-b1363d96-78de-4629-8c46-e5cd4cbb9e02,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46553,DS-820f820f-a0a2-4a8b-914e-94b63dc152d2,DISK]) is bad. 2024-11-14T09:30:53,123 WARN [Thread-952 {}] hdfs.DataStreamer(1850): Abandoning BP-2105580279-172.17.0.2-1731576630823:blk_1073741870_1053 2024-11-14T09:30:53,123 WARN [Thread-952 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46553,DS-820f820f-a0a2-4a8b-914e-94b63dc152d2,DISK] 2024-11-14T09:30:53,124 WARN [Thread-952 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741871_1054 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:30:53,124 WARN [Thread-952 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2105580279-172.17.0.2-1731576630823:blk_1073741871_1054 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33547,DS-ce8d1ceb-6553-4b42-9334-d7c84d70f99d,DISK], DatanodeInfoWithStorage[127.0.0.1:42393,DS-b1363d96-78de-4629-8c46-e5cd4cbb9e02,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33547,DS-ce8d1ceb-6553-4b42-9334-d7c84d70f99d,DISK]) is bad. 2024-11-14T09:30:53,124 WARN [Thread-952 {}] hdfs.DataStreamer(1850): Abandoning BP-2105580279-172.17.0.2-1731576630823:blk_1073741871_1054 2024-11-14T09:30:53,125 WARN [Thread-952 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33547,DS-ce8d1ceb-6553-4b42-9334-d7c84d70f99d,DISK] 2024-11-14T09:30:53,125 WARN [IPC Server handler 1 on default port 36133 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-14T09:30:53,125 WARN [IPC Server handler 1 on default port 36133 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-14T09:30:53,126 WARN [IPC Server handler 1 on default port 36133 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-14T09:30:53,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42393 is added to blk_1073741872_1055 (size=6027) 2024-11-14T09:30:53,370 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@279ccdac[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:42393, datanodeUuid=8d91c2d0-a1ea-4db7-bada-85ae4e8ce831, infoPort=34607, infoSecurePort=0, ipcPort=45647, storageInfo=lv=-57;cid=testClusterID;nsid=1875755621;c=1731576630823):Failed to transfer BP-2105580279-172.17.0.2-1731576630823:blk_1073741842_1025 to 127.0.0.1:37063 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:30:53,370 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@23a35d00[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:42393, datanodeUuid=8d91c2d0-a1ea-4db7-bada-85ae4e8ce831, infoPort=34607, infoSecurePort=0, ipcPort=45647, storageInfo=lv=-57;cid=testClusterID;nsid=1875755621;c=1731576630823):Failed to transfer BP-2105580279-172.17.0.2-1731576630823:blk_1073741862_1045 to 127.0.0.1:33547 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:30:53,530 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=45 (bloomFilter=true), to=hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/.tmp/info/45e66f42f05940fb8a2564a8c7886e27 2024-11-14T09:30:53,537 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/.tmp/info/45e66f42f05940fb8a2564a8c7886e27 as hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/info/45e66f42f05940fb8a2564a8c7886e27 2024-11-14T09:30:53,542 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/info/45e66f42f05940fb8a2564a8c7886e27, entries=1, sequenceid=45, filesize=5.9 K 2024-11-14T09:30:53,543 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 6901bf69dd23f7cb187b9800f3d62146 in 431ms, sequenceid=45, compaction requested=false 2024-11-14T09:30:53,543 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 6901bf69dd23f7cb187b9800f3d62146: 2024-11-14T09:30:53,544 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.5 K, sizeToCheck=16.0 K 2024-11-14T09:30:53,544 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T09:30:53,544 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/info/11c5d17733b24d30bcc63bc432b3bffe because midkey is the same as first or last row 2024-11-14T09:30:53,659 INFO [regionserver/83f56b55f2af:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:30:53,660 WARN [regionserver/83f56b55f2af:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42393,DS-b1363d96-78de-4629-8c46-e5cd4cbb9e02,DISK]] 2024-11-14T09:30:53,660 DEBUG [regionserver/83f56b55f2af:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 83f56b55f2af%2C44811%2C1731576631688:(num 1731576651639) roll requested 2024-11-14T09:30:53,660 INFO [regionserver/83f56b55f2af:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 83f56b55f2af%2C44811%2C1731576631688.1731576653660 2024-11-14T09:30:53,663 WARN [Thread-957 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741873_1056 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:30:53,663 WARN [Thread-957 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2105580279-172.17.0.2-1731576630823:blk_1073741873_1056 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33547,DS-ce8d1ceb-6553-4b42-9334-d7c84d70f99d,DISK], DatanodeInfoWithStorage[127.0.0.1:42393,DS-b1363d96-78de-4629-8c46-e5cd4cbb9e02,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33547,DS-ce8d1ceb-6553-4b42-9334-d7c84d70f99d,DISK]) is bad. 2024-11-14T09:30:53,663 WARN [Thread-957 {}] hdfs.DataStreamer(1850): Abandoning BP-2105580279-172.17.0.2-1731576630823:blk_1073741873_1056 2024-11-14T09:30:53,663 WARN [Thread-957 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33547,DS-ce8d1ceb-6553-4b42-9334-d7c84d70f99d,DISK] 2024-11-14T09:30:53,665 WARN [Thread-957 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741874_1057 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:30:53,665 WARN [Thread-957 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2105580279-172.17.0.2-1731576630823:blk_1073741874_1057 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK], DatanodeInfoWithStorage[127.0.0.1:37063,DS-c2603025-b29b-4678-b265-b194df3ecbcd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK]) is bad. 2024-11-14T09:30:53,665 WARN [Thread-957 {}] hdfs.DataStreamer(1850): Abandoning BP-2105580279-172.17.0.2-1731576630823:blk_1073741874_1057 2024-11-14T09:30:53,665 WARN [Thread-957 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK] 2024-11-14T09:30:53,666 WARN [Thread-957 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741875_1058 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:30:53,667 WARN [Thread-957 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2105580279-172.17.0.2-1731576630823:blk_1073741875_1058 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46553,DS-820f820f-a0a2-4a8b-914e-94b63dc152d2,DISK], DatanodeInfoWithStorage[127.0.0.1:37063,DS-c2603025-b29b-4678-b265-b194df3ecbcd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46553,DS-820f820f-a0a2-4a8b-914e-94b63dc152d2,DISK]) is bad. 2024-11-14T09:30:53,667 WARN [Thread-957 {}] hdfs.DataStreamer(1850): Abandoning BP-2105580279-172.17.0.2-1731576630823:blk_1073741875_1058 2024-11-14T09:30:53,667 WARN [Thread-957 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46553,DS-820f820f-a0a2-4a8b-914e-94b63dc152d2,DISK] 2024-11-14T09:30:53,668 WARN [Thread-957 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741876_1059 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:30:53,668 WARN [Thread-957 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2105580279-172.17.0.2-1731576630823:blk_1073741876_1059 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37063,DS-c2603025-b29b-4678-b265-b194df3ecbcd,DISK], DatanodeInfoWithStorage[127.0.0.1:42393,DS-b1363d96-78de-4629-8c46-e5cd4cbb9e02,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37063,DS-c2603025-b29b-4678-b265-b194df3ecbcd,DISK]) is bad. 2024-11-14T09:30:53,668 WARN [Thread-957 {}] hdfs.DataStreamer(1850): Abandoning BP-2105580279-172.17.0.2-1731576630823:blk_1073741876_1059 2024-11-14T09:30:53,669 WARN [Thread-957 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37063,DS-c2603025-b29b-4678-b265-b194df3ecbcd,DISK] 2024-11-14T09:30:53,670 WARN [IPC Server handler 1 on default port 36133 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-14T09:30:53,670 WARN [IPC Server handler 1 on default port 36133 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-14T09:30:53,670 WARN [IPC Server handler 1 on default port 36133 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-14T09:30:53,672 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:30:53,672 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:30:53,672 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:30:53,673 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:30:53,673 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:30:53,673 INFO [regionserver/83f56b55f2af:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.1731576651639 with entries=15, filesize=13.26 KB; new WAL /user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.1731576653660 2024-11-14T09:30:53,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42393 is added to blk_1073741857_1040 (size=13591) 2024-11-14T09:30:53,681 DEBUG [regionserver/83f56b55f2af:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34607:34607)] 2024-11-14T09:30:53,681 DEBUG [regionserver/83f56b55f2af:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.1731576632110 is not closed yet, will try archiving it next time 2024-11-14T09:30:53,681 DEBUG [regionserver/83f56b55f2af:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.1731576651639 is not closed yet, will try archiving it next time 2024-11-14T09:30:53,681 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.1731576647620 to hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/oldWALs/83f56b55f2af%2C44811%2C1731576631688.1731576647620 2024-11-14T09:30:53,776 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:30:54,076 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.1731576632110 is not closed yet, will try archiving it next time 2024-11-14T09:30:54,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44811 {}] regionserver.HRegion(8855): Flush requested on 6901bf69dd23f7cb187b9800f3d62146 2024-11-14T09:30:54,531 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 6901bf69dd23f7cb187b9800f3d62146 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-14T09:30:54,535 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/.tmp/info/680a64e4da1d4096b3c0dd192e8d66ec is 1079, key is tmprow/info:/1731576654529/Put/seqid=0 2024-11-14T09:30:54,537 WARN [Thread-961 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741878_1061 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:30:54,537 WARN [Thread-961 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2105580279-172.17.0.2-1731576630823:blk_1073741878_1061 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46553,DS-820f820f-a0a2-4a8b-914e-94b63dc152d2,DISK], DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46553,DS-820f820f-a0a2-4a8b-914e-94b63dc152d2,DISK]) is bad. 2024-11-14T09:30:54,537 WARN [Thread-961 {}] hdfs.DataStreamer(1850): Abandoning BP-2105580279-172.17.0.2-1731576630823:blk_1073741878_1061 2024-11-14T09:30:54,538 WARN [Thread-961 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46553,DS-820f820f-a0a2-4a8b-914e-94b63dc152d2,DISK] 2024-11-14T09:30:54,539 WARN [Thread-961 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741879_1062 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:30:54,539 WARN [Thread-961 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2105580279-172.17.0.2-1731576630823:blk_1073741879_1062 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33547,DS-ce8d1ceb-6553-4b42-9334-d7c84d70f99d,DISK], DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33547,DS-ce8d1ceb-6553-4b42-9334-d7c84d70f99d,DISK]) is bad. 2024-11-14T09:30:54,539 WARN [Thread-961 {}] hdfs.DataStreamer(1850): Abandoning BP-2105580279-172.17.0.2-1731576630823:blk_1073741879_1062 2024-11-14T09:30:54,539 WARN [Thread-961 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33547,DS-ce8d1ceb-6553-4b42-9334-d7c84d70f99d,DISK] 2024-11-14T09:30:54,541 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1006583690_22 at /127.0.0.1:35522 [Receiving block BP-2105580279-172.17.0.2-1731576630823:blk_1073741880_1063] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ffe8a70d-1c55-147c-4cf7-f32b5e8ed0b0/cluster_81ef3b20-c9a7-5a06-86c1-55a43558ceb4/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ffe8a70d-1c55-147c-4cf7-f32b5e8ed0b0/cluster_81ef3b20-c9a7-5a06-86c1-55a43558ceb4/data/data8]'}, localName='127.0.0.1:42393', datanodeUuid='8d91c2d0-a1ea-4db7-bada-85ae4e8ce831', xmitsInProgress=0}:Exception transferring block BP-2105580279-172.17.0.2-1731576630823:blk_1073741880_1063 to mirror 127.0.0.1:37737 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:30:54,541 WARN [Thread-961 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741880_1063 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37737 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:30:54,541 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1006583690_22 at /127.0.0.1:35522 [Receiving block BP-2105580279-172.17.0.2-1731576630823:blk_1073741880_1063] {}] datanode.BlockReceiver(316): Block 1073741880 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-14T09:30:54,541 WARN [Thread-961 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2105580279-172.17.0.2-1731576630823:blk_1073741880_1063 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42393,DS-b1363d96-78de-4629-8c46-e5cd4cbb9e02,DISK], DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK]) is bad. 2024-11-14T09:30:54,542 WARN [Thread-961 {}] hdfs.DataStreamer(1850): Abandoning BP-2105580279-172.17.0.2-1731576630823:blk_1073741880_1063 2024-11-14T09:30:54,542 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1006583690_22 at /127.0.0.1:35522 [Receiving block BP-2105580279-172.17.0.2-1731576630823:blk_1073741880_1063] {}] datanode.DataXceiver(331): 127.0.0.1:42393:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35522 dst: /127.0.0.1:42393 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:30:54,542 WARN [Thread-961 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK] 2024-11-14T09:30:54,543 WARN [Thread-961 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741881_1064 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:30:54,543 WARN [Thread-961 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2105580279-172.17.0.2-1731576630823:blk_1073741881_1064 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37063,DS-c2603025-b29b-4678-b265-b194df3ecbcd,DISK], DatanodeInfoWithStorage[127.0.0.1:42393,DS-b1363d96-78de-4629-8c46-e5cd4cbb9e02,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37063,DS-c2603025-b29b-4678-b265-b194df3ecbcd,DISK]) is bad. 2024-11-14T09:30:54,543 WARN [Thread-961 {}] hdfs.DataStreamer(1850): Abandoning BP-2105580279-172.17.0.2-1731576630823:blk_1073741881_1064 2024-11-14T09:30:54,544 WARN [Thread-961 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37063,DS-c2603025-b29b-4678-b265-b194df3ecbcd,DISK] 2024-11-14T09:30:54,544 WARN [IPC Server handler 2 on default port 36133 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-14T09:30:54,544 WARN [IPC Server handler 2 on default port 36133 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-14T09:30:54,545 WARN [IPC Server handler 2 on default port 36133 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-14T09:30:54,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42393 is added to blk_1073741882_1065 (size=6027) 2024-11-14T09:30:54,701 INFO [regionserver/83f56b55f2af:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:30:54,948 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/.tmp/info/680a64e4da1d4096b3c0dd192e8d66ec 2024-11-14T09:30:54,955 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/.tmp/info/680a64e4da1d4096b3c0dd192e8d66ec as hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/info/680a64e4da1d4096b3c0dd192e8d66ec 2024-11-14T09:30:54,960 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/info/680a64e4da1d4096b3c0dd192e8d66ec, entries=1, sequenceid=55, filesize=5.9 K 2024-11-14T09:30:54,961 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 6901bf69dd23f7cb187b9800f3d62146 in 431ms, sequenceid=55, compaction requested=true 2024-11-14T09:30:54,961 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 6901bf69dd23f7cb187b9800f3d62146: 2024-11-14T09:30:54,962 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=29.3 K, sizeToCheck=16.0 K 2024-11-14T09:30:54,962 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T09:30:54,962 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/info/11c5d17733b24d30bcc63bc432b3bffe because midkey is the same as first or last row 2024-11-14T09:30:54,962 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6901bf69dd23f7cb187b9800f3d62146:info, priority=-2147483648, current under compaction store size is 1 2024-11-14T09:30:54,962 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T09:30:54,962 DEBUG [RS:0;83f56b55f2af:44811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-14T09:30:54,963 DEBUG [RS:0;83f56b55f2af:44811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 30048 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-14T09:30:54,963 DEBUG [RS:0;83f56b55f2af:44811-shortCompactions-0 {}] regionserver.HStore(1541): 6901bf69dd23f7cb187b9800f3d62146/info is initiating minor compaction (all files) 2024-11-14T09:30:54,963 INFO [RS:0;83f56b55f2af:44811-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 6901bf69dd23f7cb187b9800f3d62146/info in TestLogRolling-testLogRollOnDatanodeDeath,,1731576632731.6901bf69dd23f7cb187b9800f3d62146. 2024-11-14T09:30:54,963 INFO [RS:0;83f56b55f2af:44811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/info/11c5d17733b24d30bcc63bc432b3bffe, hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/info/45e66f42f05940fb8a2564a8c7886e27, hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/info/680a64e4da1d4096b3c0dd192e8d66ec] into tmpdir=hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/.tmp, totalSize=29.3 K 2024-11-14T09:30:54,964 DEBUG [RS:0;83f56b55f2af:44811-shortCompactions-0 {}] compactions.Compactor(225): Compacting 11c5d17733b24d30bcc63bc432b3bffe, keycount=12, bloomtype=ROW, size=17.6 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1731576645629 2024-11-14T09:30:54,964 DEBUG [RS:0;83f56b55f2af:44811-shortCompactions-0 {}] compactions.Compactor(225): Compacting 45e66f42f05940fb8a2564a8c7886e27, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=45, earliestPutTs=1731576653111 2024-11-14T09:30:54,965 DEBUG [RS:0;83f56b55f2af:44811-shortCompactions-0 {}] compactions.Compactor(225): Compacting 680a64e4da1d4096b3c0dd192e8d66ec, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1731576654529 2024-11-14T09:30:54,981 INFO [RS:0;83f56b55f2af:44811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6901bf69dd23f7cb187b9800f3d62146#info#compaction#24 average throughput is 4.10 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-14T09:30:54,981 DEBUG [RS:0;83f56b55f2af:44811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/.tmp/info/28845e91a3624ec0a4dba43752cd2684 is 1080, key is row0002/info:/1731576645629/Put/seqid=0 2024-11-14T09:30:54,983 WARN [Thread-966 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741883_1066 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:30:54,983 WARN [Thread-966 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2105580279-172.17.0.2-1731576630823:blk_1073741883_1066 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46553,DS-820f820f-a0a2-4a8b-914e-94b63dc152d2,DISK], DatanodeInfoWithStorage[127.0.0.1:42393,DS-b1363d96-78de-4629-8c46-e5cd4cbb9e02,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46553,DS-820f820f-a0a2-4a8b-914e-94b63dc152d2,DISK]) is bad. 2024-11-14T09:30:54,983 WARN [Thread-966 {}] hdfs.DataStreamer(1850): Abandoning BP-2105580279-172.17.0.2-1731576630823:blk_1073741883_1066 2024-11-14T09:30:54,984 WARN [Thread-966 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46553,DS-820f820f-a0a2-4a8b-914e-94b63dc152d2,DISK] 2024-11-14T09:30:54,985 WARN [Thread-966 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741884_1067 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:30:54,985 WARN [Thread-966 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2105580279-172.17.0.2-1731576630823:blk_1073741884_1067 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK], DatanodeInfoWithStorage[127.0.0.1:37063,DS-c2603025-b29b-4678-b265-b194df3ecbcd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK]) is bad. 2024-11-14T09:30:54,985 WARN [Thread-966 {}] hdfs.DataStreamer(1850): Abandoning BP-2105580279-172.17.0.2-1731576630823:blk_1073741884_1067 2024-11-14T09:30:54,986 WARN [Thread-966 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK] 2024-11-14T09:30:54,987 WARN [Thread-966 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741885_1068 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:30:54,987 WARN [Thread-966 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2105580279-172.17.0.2-1731576630823:blk_1073741885_1068 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37063,DS-c2603025-b29b-4678-b265-b194df3ecbcd,DISK], DatanodeInfoWithStorage[127.0.0.1:33547,DS-ce8d1ceb-6553-4b42-9334-d7c84d70f99d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37063,DS-c2603025-b29b-4678-b265-b194df3ecbcd,DISK]) is bad. 2024-11-14T09:30:54,987 WARN [Thread-966 {}] hdfs.DataStreamer(1850): Abandoning BP-2105580279-172.17.0.2-1731576630823:blk_1073741885_1068 2024-11-14T09:30:54,988 WARN [Thread-966 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37063,DS-c2603025-b29b-4678-b265-b194df3ecbcd,DISK] 2024-11-14T09:30:54,992 WARN [Thread-966 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741886_1069 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33547 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:30:54,992 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1006583690_22 at /127.0.0.1:35548 [Receiving block BP-2105580279-172.17.0.2-1731576630823:blk_1073741886_1069] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ffe8a70d-1c55-147c-4cf7-f32b5e8ed0b0/cluster_81ef3b20-c9a7-5a06-86c1-55a43558ceb4/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ffe8a70d-1c55-147c-4cf7-f32b5e8ed0b0/cluster_81ef3b20-c9a7-5a06-86c1-55a43558ceb4/data/data8]'}, localName='127.0.0.1:42393', datanodeUuid='8d91c2d0-a1ea-4db7-bada-85ae4e8ce831', xmitsInProgress=0}:Exception transferring block BP-2105580279-172.17.0.2-1731576630823:blk_1073741886_1069 to mirror 127.0.0.1:33547 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:30:54,992 WARN [Thread-966 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2105580279-172.17.0.2-1731576630823:blk_1073741886_1069 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42393,DS-b1363d96-78de-4629-8c46-e5cd4cbb9e02,DISK], DatanodeInfoWithStorage[127.0.0.1:33547,DS-ce8d1ceb-6553-4b42-9334-d7c84d70f99d,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33547,DS-ce8d1ceb-6553-4b42-9334-d7c84d70f99d,DISK]) is bad. 2024-11-14T09:30:54,992 WARN [Thread-966 {}] hdfs.DataStreamer(1850): Abandoning BP-2105580279-172.17.0.2-1731576630823:blk_1073741886_1069 2024-11-14T09:30:54,992 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1006583690_22 at /127.0.0.1:35548 [Receiving block BP-2105580279-172.17.0.2-1731576630823:blk_1073741886_1069] {}] datanode.BlockReceiver(316): Block 1073741886 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-14T09:30:54,993 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1006583690_22 at /127.0.0.1:35548 [Receiving block BP-2105580279-172.17.0.2-1731576630823:blk_1073741886_1069] {}] datanode.DataXceiver(331): 127.0.0.1:42393:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35548 dst: /127.0.0.1:42393 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:30:54,993 WARN [Thread-966 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33547,DS-ce8d1ceb-6553-4b42-9334-d7c84d70f99d,DISK] 2024-11-14T09:30:54,993 WARN [IPC Server handler 1 on default port 36133 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-14T09:30:54,993 WARN [IPC Server handler 1 on default port 36133 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-14T09:30:54,993 WARN [IPC Server handler 1 on default port 36133 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-14T09:30:54,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42393 is added to blk_1073741887_1070 (size=18097) 2024-11-14T09:30:55,370 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@279ccdac[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:42393, datanodeUuid=8d91c2d0-a1ea-4db7-bada-85ae4e8ce831, infoPort=34607, infoSecurePort=0, ipcPort=45647, storageInfo=lv=-57;cid=testClusterID;nsid=1875755621;c=1731576630823):Failed to transfer BP-2105580279-172.17.0.2-1731576630823:blk_1073741872_1055 to 127.0.0.1:33547 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:30:55,370 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@23a35d00[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:42393, datanodeUuid=8d91c2d0-a1ea-4db7-bada-85ae4e8ce831, infoPort=34607, infoSecurePort=0, ipcPort=45647, storageInfo=lv=-57;cid=testClusterID;nsid=1875755621;c=1731576630823):Failed to transfer BP-2105580279-172.17.0.2-1731576630823:blk_1073741867_1050 to 127.0.0.1:33547 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:30:55,405 DEBUG [RS:0;83f56b55f2af:44811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/.tmp/info/28845e91a3624ec0a4dba43752cd2684 as hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/info/28845e91a3624ec0a4dba43752cd2684 2024-11-14T09:30:55,413 INFO [RS:0;83f56b55f2af:44811-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 6901bf69dd23f7cb187b9800f3d62146/info of 6901bf69dd23f7cb187b9800f3d62146 into 28845e91a3624ec0a4dba43752cd2684(size=17.7 K), total size for store is 17.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-14T09:30:55,414 DEBUG [RS:0;83f56b55f2af:44811-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 6901bf69dd23f7cb187b9800f3d62146: 2024-11-14T09:30:55,414 INFO [RS:0;83f56b55f2af:44811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1731576632731.6901bf69dd23f7cb187b9800f3d62146., storeName=6901bf69dd23f7cb187b9800f3d62146/info, priority=13, startTime=1731576654962; duration=0sec 2024-11-14T09:30:55,414 DEBUG [RS:0;83f56b55f2af:44811-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-14T09:30:55,414 DEBUG [RS:0;83f56b55f2af:44811-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T09:30:55,414 DEBUG [RS:0;83f56b55f2af:44811-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/info/28845e91a3624ec0a4dba43752cd2684 because midkey is the same as first or last row 2024-11-14T09:30:55,414 DEBUG [RS:0;83f56b55f2af:44811-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-14T09:30:55,414 DEBUG [RS:0;83f56b55f2af:44811-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T09:30:55,414 DEBUG [RS:0;83f56b55f2af:44811-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/info/28845e91a3624ec0a4dba43752cd2684 because midkey is the same as first or last row 2024-11-14T09:30:55,414 DEBUG [RS:0;83f56b55f2af:44811-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-14T09:30:55,414 DEBUG [RS:0;83f56b55f2af:44811-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T09:30:55,414 DEBUG [RS:0;83f56b55f2af:44811-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/info/28845e91a3624ec0a4dba43752cd2684 because midkey is the same as first or last row 2024-11-14T09:30:55,414 DEBUG [RS:0;83f56b55f2af:44811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T09:30:55,414 DEBUG [RS:0;83f56b55f2af:44811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6901bf69dd23f7cb187b9800f3d62146:info 2024-11-14T09:30:55,681 INFO [regionserver/83f56b55f2af:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:30:55,681 WARN [regionserver/83f56b55f2af:0.logRoller {}] wal.FSHLog(539): Too many consecutive RollWriter requests, it's a sign of the total number of live datanodes is lower than the tolerable replicas. 2024-11-14T09:30:55,752 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T09:30:55,755 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T09:30:55,756 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T09:30:55,756 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T09:30:55,756 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-14T09:30:55,757 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@633e3771{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ffe8a70d-1c55-147c-4cf7-f32b5e8ed0b0/hadoop.log.dir/,AVAILABLE} 2024-11-14T09:30:55,757 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@72ba7a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T09:30:55,776 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:30:55,872 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@471de816{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ffe8a70d-1c55-147c-4cf7-f32b5e8ed0b0/java.io.tmpdir/jetty-localhost-36829-hadoop-hdfs-3_4_1-tests_jar-_-any-904111118346137978/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T09:30:55,872 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@fd22f99{HTTP/1.1, (http/1.1)}{localhost:36829} 2024-11-14T09:30:55,872 INFO [Time-limited test {}] server.Server(415): Started @130058ms 2024-11-14T09:30:55,874 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T09:30:55,981 WARN [Thread-985 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T09:30:55,989 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x737cee60b00450b with lease ID 0xe55f664660dab1a: from storage DS-820f820f-a0a2-4a8b-914e-94b63dc152d2 node DatanodeRegistration(127.0.0.1:35455, datanodeUuid=340cd0fe-f1d0-4fc4-8a97-207897c75a92, infoPort=38567, infoSecurePort=0, ipcPort=36089, storageInfo=lv=-57;cid=testClusterID;nsid=1875755621;c=1731576630823), blocks: 7, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-14T09:30:55,989 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x737cee60b00450b with lease ID 0xe55f664660dab1a: from storage DS-63205298-e9c4-4b00-967d-5a4dd529df6d node DatanodeRegistration(127.0.0.1:35455, datanodeUuid=340cd0fe-f1d0-4fc4-8a97-207897c75a92, infoPort=38567, infoSecurePort=0, ipcPort=36089, storageInfo=lv=-57;cid=testClusterID;nsid=1875755621;c=1731576630823), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T09:30:56,370 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@23a35d00[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:42393, datanodeUuid=8d91c2d0-a1ea-4db7-bada-85ae4e8ce831, infoPort=34607, infoSecurePort=0, ipcPort=45647, storageInfo=lv=-57;cid=testClusterID;nsid=1875755621;c=1731576630823):Failed to transfer BP-2105580279-172.17.0.2-1731576630823:blk_1073741857_1040 to 127.0.0.1:33547 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:30:56,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35455 is added to blk_1073741882_1065 (size=6027) 2024-11-14T09:30:56,701 INFO [regionserver/83f56b55f2af:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:30:57,681 INFO [regionserver/83f56b55f2af:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:30:57,776 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:30:58,370 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@279ccdac[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:42393, datanodeUuid=8d91c2d0-a1ea-4db7-bada-85ae4e8ce831, infoPort=34607, infoSecurePort=0, ipcPort=45647, storageInfo=lv=-57;cid=testClusterID;nsid=1875755621;c=1731576630823):Failed to transfer BP-2105580279-172.17.0.2-1731576630823:blk_1073741887_1070 to 127.0.0.1:33547 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:30:58,702 INFO [regionserver/83f56b55f2af:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:30:59,682 INFO [regionserver/83f56b55f2af:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:30:59,777 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:31:00,702 INFO [regionserver/83f56b55f2af:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:31:01,614 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-14T09:31:01,682 INFO [regionserver/83f56b55f2af:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:31:01,777 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:31:01,890 ERROR [FSHLog-0-hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/MasterData-prefix:83f56b55f2af,40525,1731576631633 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:31:01,890 WARN [FSHLog-0-hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/MasterData-prefix:83f56b55f2af,40525,1731576631633 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:31:01,890 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 83f56b55f2af%2C40525%2C1731576631633:(num 1731576631798) roll requested 2024-11-14T09:31:01,891 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 83f56b55f2af%2C40525%2C1731576631633.1731576661891 2024-11-14T09:31:01,894 WARN [Thread-1006 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741888_1071 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:31:01,894 WARN [Thread-1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2105580279-172.17.0.2-1731576630823:blk_1073741888_1071 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33547,DS-ce8d1ceb-6553-4b42-9334-d7c84d70f99d,DISK], DatanodeInfoWithStorage[127.0.0.1:35455,DS-820f820f-a0a2-4a8b-914e-94b63dc152d2,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33547,DS-ce8d1ceb-6553-4b42-9334-d7c84d70f99d,DISK]) is bad. 2024-11-14T09:31:01,894 WARN [Thread-1006 {}] hdfs.DataStreamer(1850): Abandoning BP-2105580279-172.17.0.2-1731576630823:blk_1073741888_1071 2024-11-14T09:31:01,895 WARN [Thread-1006 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33547,DS-ce8d1ceb-6553-4b42-9334-d7c84d70f99d,DISK] 2024-11-14T09:31:01,898 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:31:01,899 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:31:01,899 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:31:01,899 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:31:01,899 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:31:01,899 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/MasterData/WALs/83f56b55f2af,40525,1731576631633/83f56b55f2af%2C40525%2C1731576631633.1731576631798 with entries=54, filesize=26.68 KB; new WAL /user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/MasterData/WALs/83f56b55f2af,40525,1731576631633/83f56b55f2af%2C40525%2C1731576631633.1731576661891 2024-11-14T09:31:01,899 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:31:01,900 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:31:01,900 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/MasterData/WALs/83f56b55f2af,40525,1731576631633/83f56b55f2af%2C40525%2C1731576631633.1731576631798 2024-11-14T09:31:01,900 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34607:34607),(127.0.0.1/127.0.0.1:38567:38567)] 2024-11-14T09:31:01,900 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/MasterData/WALs/83f56b55f2af,40525,1731576631633/83f56b55f2af%2C40525%2C1731576631633.1731576631798 is not closed yet, will try archiving it next time 2024-11-14T09:31:01,900 WARN [IPC Server handler 2 on default port 36133 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/MasterData/WALs/83f56b55f2af,40525,1731576631633/83f56b55f2af%2C40525%2C1731576631633.1731576631798 has not been closed. Lease recovery is in progress. RecoveryId = 1073 for block blk_1073741830_1006 2024-11-14T09:31:01,900 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/MasterData/WALs/83f56b55f2af,40525,1731576631633/83f56b55f2af%2C40525%2C1731576631633.1731576631798 after 0ms 2024-11-14T09:31:02,702 INFO [regionserver/83f56b55f2af:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:31:03,682 INFO [regionserver/83f56b55f2af:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:31:04,703 INFO [regionserver/83f56b55f2af:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:31:05,683 INFO [regionserver/83f56b55f2af:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:31:05,902 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/MasterData/WALs/83f56b55f2af,40525,1731576631633/83f56b55f2af%2C40525%2C1731576631633.1731576631798 after 4002ms 2024-11-14T09:31:06,002 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@27a1a0c3 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-2105580279-172.17.0.2-1731576630823:blk_1073741833_1009, datanode=DatanodeInfoWithStorage[127.0.0.1:37737,null,null]) java.net.ConnectException: Call From 83f56b55f2af/172.17.0.2 to localhost:44553 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-14T09:31:06,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35455 is added to blk_1073741833_1019 (size=455) 2024-11-14T09:31:06,641 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.1731576632110 to hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/oldWALs/83f56b55f2af%2C44811%2C1731576631688.1731576632110 2024-11-14T09:31:06,642 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.1731576651639 to hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/oldWALs/83f56b55f2af%2C44811%2C1731576631688.1731576651639 2024-11-14T09:31:06,703 INFO [regionserver/83f56b55f2af:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:31:07,683 INFO [regionserver/83f56b55f2af:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:31:07,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42393 is added to blk_1073741833_1019 (size=455) 2024-11-14T09:31:08,703 INFO [regionserver/83f56b55f2af:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:31:09,195 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 83f56b55f2af%2C44811%2C1731576631688.1731576669194 2024-11-14T09:31:09,198 WARN [Thread-1017 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741890_1074 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33547 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:31:09,198 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1558881060_22 at /127.0.0.1:39714 [Receiving block BP-2105580279-172.17.0.2-1731576630823:blk_1073741890_1074] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ffe8a70d-1c55-147c-4cf7-f32b5e8ed0b0/cluster_81ef3b20-c9a7-5a06-86c1-55a43558ceb4/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ffe8a70d-1c55-147c-4cf7-f32b5e8ed0b0/cluster_81ef3b20-c9a7-5a06-86c1-55a43558ceb4/data/data4]'}, localName='127.0.0.1:35455', datanodeUuid='340cd0fe-f1d0-4fc4-8a97-207897c75a92', xmitsInProgress=0}:Exception transferring block BP-2105580279-172.17.0.2-1731576630823:blk_1073741890_1074 to mirror 127.0.0.1:33547 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:31:09,198 WARN [Thread-1017 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2105580279-172.17.0.2-1731576630823:blk_1073741890_1074 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35455,DS-820f820f-a0a2-4a8b-914e-94b63dc152d2,DISK], DatanodeInfoWithStorage[127.0.0.1:33547,DS-ce8d1ceb-6553-4b42-9334-d7c84d70f99d,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33547,DS-ce8d1ceb-6553-4b42-9334-d7c84d70f99d,DISK]) is bad. 2024-11-14T09:31:09,198 WARN [Thread-1017 {}] hdfs.DataStreamer(1850): Abandoning BP-2105580279-172.17.0.2-1731576630823:blk_1073741890_1074 2024-11-14T09:31:09,199 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1558881060_22 at /127.0.0.1:39714 [Receiving block BP-2105580279-172.17.0.2-1731576630823:blk_1073741890_1074] {}] datanode.BlockReceiver(316): Block 1073741890 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-14T09:31:09,199 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1558881060_22 at /127.0.0.1:39714 [Receiving block BP-2105580279-172.17.0.2-1731576630823:blk_1073741890_1074] {}] datanode.DataXceiver(331): 127.0.0.1:35455:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39714 dst: /127.0.0.1:35455 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:31:09,199 WARN [Thread-1017 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33547,DS-ce8d1ceb-6553-4b42-9334-d7c84d70f99d,DISK] 2024-11-14T09:31:09,201 WARN [Thread-1017 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741891_1075 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37063 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:31:09,201 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1558881060_22 at /127.0.0.1:39716 [Receiving block BP-2105580279-172.17.0.2-1731576630823:blk_1073741891_1075] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ffe8a70d-1c55-147c-4cf7-f32b5e8ed0b0/cluster_81ef3b20-c9a7-5a06-86c1-55a43558ceb4/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ffe8a70d-1c55-147c-4cf7-f32b5e8ed0b0/cluster_81ef3b20-c9a7-5a06-86c1-55a43558ceb4/data/data4]'}, localName='127.0.0.1:35455', datanodeUuid='340cd0fe-f1d0-4fc4-8a97-207897c75a92', xmitsInProgress=0}:Exception transferring block BP-2105580279-172.17.0.2-1731576630823:blk_1073741891_1075 to mirror 127.0.0.1:37063 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:31:09,201 WARN [Thread-1017 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2105580279-172.17.0.2-1731576630823:blk_1073741891_1075 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35455,DS-820f820f-a0a2-4a8b-914e-94b63dc152d2,DISK], DatanodeInfoWithStorage[127.0.0.1:37063,DS-c2603025-b29b-4678-b265-b194df3ecbcd,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37063,DS-c2603025-b29b-4678-b265-b194df3ecbcd,DISK]) is bad. 2024-11-14T09:31:09,201 WARN [Thread-1017 {}] hdfs.DataStreamer(1850): Abandoning BP-2105580279-172.17.0.2-1731576630823:blk_1073741891_1075 2024-11-14T09:31:09,201 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1558881060_22 at /127.0.0.1:39716 [Receiving block BP-2105580279-172.17.0.2-1731576630823:blk_1073741891_1075] {}] datanode.BlockReceiver(316): Block 1073741891 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-14T09:31:09,201 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1558881060_22 at /127.0.0.1:39716 [Receiving block BP-2105580279-172.17.0.2-1731576630823:blk_1073741891_1075] {}] datanode.DataXceiver(331): 127.0.0.1:35455:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39716 dst: /127.0.0.1:35455 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:31:09,202 WARN [Thread-1017 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37063,DS-c2603025-b29b-4678-b265-b194df3ecbcd,DISK] 2024-11-14T09:31:09,203 WARN [Thread-1017 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741892_1076 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:31:09,203 WARN [Thread-1017 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2105580279-172.17.0.2-1731576630823:blk_1073741892_1076 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK], DatanodeInfoWithStorage[127.0.0.1:42393,DS-b1363d96-78de-4629-8c46-e5cd4cbb9e02,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK]) is bad. 2024-11-14T09:31:09,203 WARN [Thread-1017 {}] hdfs.DataStreamer(1850): Abandoning BP-2105580279-172.17.0.2-1731576630823:blk_1073741892_1076 2024-11-14T09:31:09,203 WARN [Thread-1017 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK] 2024-11-14T09:31:09,208 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:31:09,208 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:31:09,208 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:31:09,208 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:31:09,208 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:31:09,209 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.1731576653660 with entries=13, filesize=12.60 KB; new WAL /user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.1731576669194 2024-11-14T09:31:09,210 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34607:34607),(127.0.0.1/127.0.0.1:38567:38567)] 2024-11-14T09:31:09,210 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.1731576653660 is not closed yet, will try archiving it next time 2024-11-14T09:31:09,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42393 is added to blk_1073741877_1060 (size=12911) 2024-11-14T09:31:09,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44811 {}] regionserver.HRegion(8855): Flush requested on 6901bf69dd23f7cb187b9800f3d62146 2024-11-14T09:31:09,215 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 6901bf69dd23f7cb187b9800f3d62146 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-14T09:31:09,220 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/.tmp/info/0a7cbfd9828041f6b217674dfa7db081 is 1080, key is row0013/info:/1731576669212/Put/seqid=0 2024-11-14T09:31:09,221 WARN [Thread-1025 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741894_1078 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:31:09,222 WARN [Thread-1025 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2105580279-172.17.0.2-1731576630823:blk_1073741894_1078 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37063,DS-c2603025-b29b-4678-b265-b194df3ecbcd,DISK], DatanodeInfoWithStorage[127.0.0.1:33547,DS-ce8d1ceb-6553-4b42-9334-d7c84d70f99d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37063,DS-c2603025-b29b-4678-b265-b194df3ecbcd,DISK]) is bad. 2024-11-14T09:31:09,222 WARN [Thread-1025 {}] hdfs.DataStreamer(1850): Abandoning BP-2105580279-172.17.0.2-1731576630823:blk_1073741894_1078 2024-11-14T09:31:09,222 WARN [Thread-1025 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37063,DS-c2603025-b29b-4678-b265-b194df3ecbcd,DISK] 2024-11-14T09:31:09,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35455 is added to blk_1073741895_1079 (size=8190) 2024-11-14T09:31:09,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42393 is added to blk_1073741895_1079 (size=8190) 2024-11-14T09:31:09,228 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/.tmp/info/0a7cbfd9828041f6b217674dfa7db081 2024-11-14T09:31:09,235 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/.tmp/info/0a7cbfd9828041f6b217674dfa7db081 as hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/info/0a7cbfd9828041f6b217674dfa7db081 2024-11-14T09:31:09,241 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/info/0a7cbfd9828041f6b217674dfa7db081, entries=3, sequenceid=66, filesize=8.0 K 2024-11-14T09:31:09,242 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7527, heapSize ~8.11 KB/8304, currentSize=9.46 KB/9683 for 6901bf69dd23f7cb187b9800f3d62146 in 27ms, sequenceid=66, compaction requested=false 2024-11-14T09:31:09,242 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 6901bf69dd23f7cb187b9800f3d62146: 2024-11-14T09:31:09,242 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=25.7 K, sizeToCheck=16.0 K 2024-11-14T09:31:09,242 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T09:31:09,242 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/info/28845e91a3624ec0a4dba43752cd2684 because midkey is the same as first or last row 2024-11-14T09:31:09,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44811 {}] regionserver.HRegion(8855): Flush requested on 6901bf69dd23f7cb187b9800f3d62146 2024-11-14T09:31:09,436 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 6901bf69dd23f7cb187b9800f3d62146 1/1 column families, dataSize=10.51 KB heapSize=11.50 KB 2024-11-14T09:31:09,441 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/.tmp/info/0b13b69a4a61497da0d503bb96eb47ca is 1080, key is row0015/info:/1731576669216/Put/seqid=0 2024-11-14T09:31:09,443 WARN [Thread-1033 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741896_1080 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:31:09,443 WARN [Thread-1033 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2105580279-172.17.0.2-1731576630823:blk_1073741896_1080 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33547,DS-ce8d1ceb-6553-4b42-9334-d7c84d70f99d,DISK], DatanodeInfoWithStorage[127.0.0.1:35455,DS-820f820f-a0a2-4a8b-914e-94b63dc152d2,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33547,DS-ce8d1ceb-6553-4b42-9334-d7c84d70f99d,DISK]) is bad. 2024-11-14T09:31:09,443 WARN [Thread-1033 {}] hdfs.DataStreamer(1850): Abandoning BP-2105580279-172.17.0.2-1731576630823:blk_1073741896_1080 2024-11-14T09:31:09,444 WARN [Thread-1033 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33547,DS-ce8d1ceb-6553-4b42-9334-d7c84d70f99d,DISK] 2024-11-14T09:31:09,445 WARN [Thread-1033 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741897_1081 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:31:09,445 WARN [Thread-1033 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2105580279-172.17.0.2-1731576630823:blk_1073741897_1081 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK], DatanodeInfoWithStorage[127.0.0.1:42393,DS-b1363d96-78de-4629-8c46-e5cd4cbb9e02,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK]) is bad. 2024-11-14T09:31:09,445 WARN [Thread-1033 {}] hdfs.DataStreamer(1850): Abandoning BP-2105580279-172.17.0.2-1731576630823:blk_1073741897_1081 2024-11-14T09:31:09,445 WARN [Thread-1033 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK] 2024-11-14T09:31:09,446 WARN [Thread-1033 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741898_1082 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:31:09,446 WARN [Thread-1033 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2105580279-172.17.0.2-1731576630823:blk_1073741898_1082 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37063,DS-c2603025-b29b-4678-b265-b194df3ecbcd,DISK], DatanodeInfoWithStorage[127.0.0.1:42393,DS-b1363d96-78de-4629-8c46-e5cd4cbb9e02,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37063,DS-c2603025-b29b-4678-b265-b194df3ecbcd,DISK]) is bad. 2024-11-14T09:31:09,446 WARN [Thread-1033 {}] hdfs.DataStreamer(1850): Abandoning BP-2105580279-172.17.0.2-1731576630823:blk_1073741898_1082 2024-11-14T09:31:09,447 WARN [Thread-1033 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37063,DS-c2603025-b29b-4678-b265-b194df3ecbcd,DISK] 2024-11-14T09:31:09,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35455 is added to blk_1073741899_1083 (size=14660) 2024-11-14T09:31:09,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42393 is added to blk_1073741899_1083 (size=14660) 2024-11-14T09:31:09,452 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.51 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/.tmp/info/0b13b69a4a61497da0d503bb96eb47ca 2024-11-14T09:31:09,458 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/.tmp/info/0b13b69a4a61497da0d503bb96eb47ca as hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/info/0b13b69a4a61497da0d503bb96eb47ca 2024-11-14T09:31:09,463 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/info/0b13b69a4a61497da0d503bb96eb47ca, entries=9, sequenceid=79, filesize=14.3 K 2024-11-14T09:31:09,464 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.51 KB/10758, heapSize ~11.48 KB/11760, currentSize=0 B/0 for 6901bf69dd23f7cb187b9800f3d62146 in 28ms, sequenceid=79, compaction requested=true 2024-11-14T09:31:09,464 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 6901bf69dd23f7cb187b9800f3d62146: 2024-11-14T09:31:09,464 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=40.0 K, sizeToCheck=16.0 K 2024-11-14T09:31:09,464 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T09:31:09,464 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/info/28845e91a3624ec0a4dba43752cd2684 because midkey is the same as first or last row 2024-11-14T09:31:09,464 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6901bf69dd23f7cb187b9800f3d62146:info, priority=-2147483648, current under compaction store size is 1 2024-11-14T09:31:09,464 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T09:31:09,464 DEBUG [RS:0;83f56b55f2af:44811-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-14T09:31:09,465 DEBUG [RS:0;83f56b55f2af:44811-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40947 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-14T09:31:09,466 DEBUG [RS:0;83f56b55f2af:44811-shortCompactions-0 {}] regionserver.HStore(1541): 6901bf69dd23f7cb187b9800f3d62146/info is initiating minor compaction (all files) 2024-11-14T09:31:09,466 INFO [RS:0;83f56b55f2af:44811-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 6901bf69dd23f7cb187b9800f3d62146/info in TestLogRolling-testLogRollOnDatanodeDeath,,1731576632731.6901bf69dd23f7cb187b9800f3d62146. 2024-11-14T09:31:09,466 INFO [RS:0;83f56b55f2af:44811-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/info/28845e91a3624ec0a4dba43752cd2684, hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/info/0a7cbfd9828041f6b217674dfa7db081, hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/info/0b13b69a4a61497da0d503bb96eb47ca] into tmpdir=hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/.tmp, totalSize=40.0 K 2024-11-14T09:31:09,466 DEBUG [RS:0;83f56b55f2af:44811-shortCompactions-0 {}] compactions.Compactor(225): Compacting 28845e91a3624ec0a4dba43752cd2684, keycount=12, bloomtype=ROW, size=17.7 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1731576645629 2024-11-14T09:31:09,467 DEBUG [RS:0;83f56b55f2af:44811-shortCompactions-0 {}] compactions.Compactor(225): Compacting 0a7cbfd9828041f6b217674dfa7db081, keycount=3, bloomtype=ROW, size=8.0 K, encoding=NONE, compression=NONE, seqNum=66, earliestPutTs=1731576655542 2024-11-14T09:31:09,467 DEBUG [RS:0;83f56b55f2af:44811-shortCompactions-0 {}] compactions.Compactor(225): Compacting 0b13b69a4a61497da0d503bb96eb47ca, keycount=9, bloomtype=ROW, size=14.3 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1731576669216 2024-11-14T09:31:09,478 INFO [RS:0;83f56b55f2af:44811-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6901bf69dd23f7cb187b9800f3d62146#info#compaction#27 average throughput is 22.57 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-14T09:31:09,479 DEBUG [RS:0;83f56b55f2af:44811-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/.tmp/info/cf58f051837e4039a722148c6dbab64c is 1080, key is row0002/info:/1731576645629/Put/seqid=0 2024-11-14T09:31:09,481 WARN [Thread-1040 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741900_1084 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:31:09,481 WARN [Thread-1040 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2105580279-172.17.0.2-1731576630823:blk_1073741900_1084 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33547,DS-ce8d1ceb-6553-4b42-9334-d7c84d70f99d,DISK], DatanodeInfoWithStorage[127.0.0.1:37063,DS-c2603025-b29b-4678-b265-b194df3ecbcd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33547,DS-ce8d1ceb-6553-4b42-9334-d7c84d70f99d,DISK]) is bad. 2024-11-14T09:31:09,481 WARN [Thread-1040 {}] hdfs.DataStreamer(1850): Abandoning BP-2105580279-172.17.0.2-1731576630823:blk_1073741900_1084 2024-11-14T09:31:09,482 WARN [Thread-1040 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33547,DS-ce8d1ceb-6553-4b42-9334-d7c84d70f99d,DISK] 2024-11-14T09:31:09,483 WARN [Thread-1040 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741901_1085 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:31:09,483 WARN [Thread-1040 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2105580279-172.17.0.2-1731576630823:blk_1073741901_1085 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK], DatanodeInfoWithStorage[127.0.0.1:37063,DS-c2603025-b29b-4678-b265-b194df3ecbcd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK]) is bad. 2024-11-14T09:31:09,483 WARN [Thread-1040 {}] hdfs.DataStreamer(1850): Abandoning BP-2105580279-172.17.0.2-1731576630823:blk_1073741901_1085 2024-11-14T09:31:09,483 WARN [Thread-1040 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK] 2024-11-14T09:31:09,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42393 is added to blk_1073741902_1086 (size=28989) 2024-11-14T09:31:09,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35455 is added to blk_1073741902_1086 (size=28989) 2024-11-14T09:31:09,495 DEBUG [RS:0;83f56b55f2af:44811-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/.tmp/info/cf58f051837e4039a722148c6dbab64c as hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/info/cf58f051837e4039a722148c6dbab64c 2024-11-14T09:31:09,502 INFO [RS:0;83f56b55f2af:44811-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 6901bf69dd23f7cb187b9800f3d62146/info of 6901bf69dd23f7cb187b9800f3d62146 into cf58f051837e4039a722148c6dbab64c(size=28.3 K), total size for store is 28.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-14T09:31:09,502 DEBUG [RS:0;83f56b55f2af:44811-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 6901bf69dd23f7cb187b9800f3d62146: 2024-11-14T09:31:09,502 INFO [RS:0;83f56b55f2af:44811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1731576632731.6901bf69dd23f7cb187b9800f3d62146., storeName=6901bf69dd23f7cb187b9800f3d62146/info, priority=13, startTime=1731576669464; duration=0sec 2024-11-14T09:31:09,502 DEBUG [RS:0;83f56b55f2af:44811-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.3 K, sizeToCheck=16.0 K 2024-11-14T09:31:09,502 DEBUG [RS:0;83f56b55f2af:44811-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T09:31:09,502 DEBUG [RS:0;83f56b55f2af:44811-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/info/cf58f051837e4039a722148c6dbab64c because midkey is the same as first or last row 2024-11-14T09:31:09,503 DEBUG [RS:0;83f56b55f2af:44811-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.3 K, sizeToCheck=16.0 K 2024-11-14T09:31:09,503 DEBUG [RS:0;83f56b55f2af:44811-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T09:31:09,503 DEBUG [RS:0;83f56b55f2af:44811-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/info/cf58f051837e4039a722148c6dbab64c because midkey is the same as first or last row 2024-11-14T09:31:09,503 DEBUG [RS:0;83f56b55f2af:44811-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.3 K, sizeToCheck=16.0 K 2024-11-14T09:31:09,503 DEBUG [RS:0;83f56b55f2af:44811-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T09:31:09,503 DEBUG [RS:0;83f56b55f2af:44811-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/info/cf58f051837e4039a722148c6dbab64c because midkey is the same as first or last row 2024-11-14T09:31:09,503 DEBUG [RS:0;83f56b55f2af:44811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T09:31:09,503 DEBUG [RS:0;83f56b55f2af:44811-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6901bf69dd23f7cb187b9800f3d62146:info 2024-11-14T09:31:09,611 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.1731576653660 to hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/oldWALs/83f56b55f2af%2C44811%2C1731576631688.1731576653660 2024-11-14T09:31:09,683 INFO [regionserver/83f56b55f2af:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:31:09,684 INFO [regionserver/83f56b55f2af:0.logRoller {}] wal.FSHLog(556): LowReplication-Roller was enabled. 2024-11-14T09:31:09,840 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-14T09:31:09,840 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-14T09:31:09,840 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T09:31:09,840 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T09:31:09,841 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T09:31:09,841 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-14T09:31:09,841 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-14T09:31:09,841 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1650848494, stopped=false 2024-11-14T09:31:09,841 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=83f56b55f2af,40525,1731576631633 2024-11-14T09:31:09,843 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40525-0x10115d07bd00000, quorum=127.0.0.1:56059, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-14T09:31:09,843 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44811-0x10115d07bd00001, quorum=127.0.0.1:56059, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-14T09:31:09,843 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40525-0x10115d07bd00000, quorum=127.0.0.1:56059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:31:09,843 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44811-0x10115d07bd00001, quorum=127.0.0.1:56059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:31:09,843 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41383-0x10115d07bd00002, quorum=127.0.0.1:56059, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-14T09:31:09,843 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-14T09:31:09,843 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41383-0x10115d07bd00002, quorum=127.0.0.1:56059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:31:09,843 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-14T09:31:09,843 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T09:31:09,843 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T09:31:09,843 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '83f56b55f2af,44811,1731576631688' ***** 2024-11-14T09:31:09,843 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-14T09:31:09,843 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '83f56b55f2af,41383,1731576632641' ***** 2024-11-14T09:31:09,843 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-14T09:31:09,843 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:40525-0x10115d07bd00000, quorum=127.0.0.1:56059, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T09:31:09,844 INFO [RS:0;83f56b55f2af:44811 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-14T09:31:09,844 INFO [RS:1;83f56b55f2af:41383 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-14T09:31:09,844 INFO [RS:0;83f56b55f2af:44811 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-14T09:31:09,844 INFO [RS:1;83f56b55f2af:41383 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-14T09:31:09,844 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-14T09:31:09,844 INFO [RS:0;83f56b55f2af:44811 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-14T09:31:09,844 INFO [RS:1;83f56b55f2af:41383 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-14T09:31:09,844 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41383-0x10115d07bd00002, quorum=127.0.0.1:56059, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T09:31:09,844 INFO [RS:1;83f56b55f2af:41383 {}] regionserver.HRegionServer(959): stopping server 83f56b55f2af,41383,1731576632641 2024-11-14T09:31:09,844 INFO [RS:0;83f56b55f2af:44811 {}] regionserver.HRegionServer(3091): Received CLOSE for 6901bf69dd23f7cb187b9800f3d62146 2024-11-14T09:31:09,844 INFO [RS:1;83f56b55f2af:41383 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-14T09:31:09,844 INFO [RS:1;83f56b55f2af:41383 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;83f56b55f2af:41383. 2024-11-14T09:31:09,844 DEBUG [RS:1;83f56b55f2af:41383 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T09:31:09,844 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-14T09:31:09,844 DEBUG [RS:1;83f56b55f2af:41383 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T09:31:09,844 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:44811-0x10115d07bd00001, quorum=127.0.0.1:56059, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T09:31:09,845 INFO [RS:1;83f56b55f2af:41383 {}] regionserver.HRegionServer(976): stopping server 83f56b55f2af,41383,1731576632641; all regions closed. 2024-11-14T09:31:09,844 INFO [RS:0;83f56b55f2af:44811 {}] regionserver.HRegionServer(959): stopping server 83f56b55f2af,44811,1731576631688 2024-11-14T09:31:09,845 INFO [RS:0;83f56b55f2af:44811 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-14T09:31:09,845 INFO [RS:0;83f56b55f2af:44811 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;83f56b55f2af:44811. 2024-11-14T09:31:09,845 DEBUG [RS:0;83f56b55f2af:44811 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T09:31:09,845 DEBUG [RS_CLOSE_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 6901bf69dd23f7cb187b9800f3d62146, disabling compactions & flushes 2024-11-14T09:31:09,845 DEBUG [RS:0;83f56b55f2af:44811 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T09:31:09,845 INFO [RS_CLOSE_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1731576632731.6901bf69dd23f7cb187b9800f3d62146. 2024-11-14T09:31:09,845 INFO [RS:0;83f56b55f2af:44811 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-14T09:31:09,845 INFO [RS:0;83f56b55f2af:44811 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-14T09:31:09,845 DEBUG [RS_CLOSE_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731576632731.6901bf69dd23f7cb187b9800f3d62146. 2024-11-14T09:31:09,845 INFO [RS:0;83f56b55f2af:44811 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-14T09:31:09,845 DEBUG [RS_CLOSE_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731576632731.6901bf69dd23f7cb187b9800f3d62146. after waiting 0 ms 2024-11-14T09:31:09,845 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:31:09,845 INFO [RS:0;83f56b55f2af:44811 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-14T09:31:09,845 DEBUG [RS_CLOSE_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1731576632731.6901bf69dd23f7cb187b9800f3d62146. 2024-11-14T09:31:09,845 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:31:09,845 INFO [RS_CLOSE_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 6901bf69dd23f7cb187b9800f3d62146 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-14T09:31:09,845 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:31:09,845 INFO [RS:0;83f56b55f2af:44811 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-14T09:31:09,846 DEBUG [RS:0;83f56b55f2af:44811 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 6901bf69dd23f7cb187b9800f3d62146=TestLogRolling-testLogRollOnDatanodeDeath,,1731576632731.6901bf69dd23f7cb187b9800f3d62146.} 2024-11-14T09:31:09,846 DEBUG [RS:0;83f56b55f2af:44811 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 6901bf69dd23f7cb187b9800f3d62146 2024-11-14T09:31:09,846 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:31:09,846 DEBUG [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-14T09:31:09,846 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:31:09,846 INFO [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-14T09:31:09,846 DEBUG [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-14T09:31:09,846 DEBUG [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-14T09:31:09,846 DEBUG [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-14T09:31:09,846 INFO [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.71 KB heapSize=3.75 KB 2024-11-14T09:31:09,846 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:31:09,846 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:31:09,846 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 2024-11-14T09:31:09,846 ERROR [FSHLog-0-hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60-prefix:83f56b55f2af,44811,1731576631688.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:31:09,846 WARN [FSHLog-0-hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60-prefix:83f56b55f2af,44811,1731576631688.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:31:09,847 DEBUG [regionserver/83f56b55f2af:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 83f56b55f2af%2C44811%2C1731576631688.meta:.meta(num 1731576632529) roll requested 2024-11-14T09:31:09,847 INFO [regionserver/83f56b55f2af:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 83f56b55f2af%2C44811%2C1731576631688.meta.1731576669847.meta 2024-11-14T09:31:09,847 WARN [IPC Server handler 3 on default port 36133 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 has not been closed. Lease recovery is in progress. RecoveryId = 1087 for block blk_1073741837_1013 2024-11-14T09:31:09,847 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 after 1ms 2024-11-14T09:31:09,851 WARN [Thread-1046 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741903_1088 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37737 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:31:09,851 WARN [Thread-1046 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2105580279-172.17.0.2-1731576630823:blk_1073741903_1088 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35455,DS-820f820f-a0a2-4a8b-914e-94b63dc152d2,DISK], DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK]) is bad. 2024-11-14T09:31:09,851 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1006583690_22 at /127.0.0.1:39782 [Receiving block BP-2105580279-172.17.0.2-1731576630823:blk_1073741903_1088] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ffe8a70d-1c55-147c-4cf7-f32b5e8ed0b0/cluster_81ef3b20-c9a7-5a06-86c1-55a43558ceb4/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ffe8a70d-1c55-147c-4cf7-f32b5e8ed0b0/cluster_81ef3b20-c9a7-5a06-86c1-55a43558ceb4/data/data4]'}, localName='127.0.0.1:35455', datanodeUuid='340cd0fe-f1d0-4fc4-8a97-207897c75a92', xmitsInProgress=0}:Exception transferring block BP-2105580279-172.17.0.2-1731576630823:blk_1073741903_1088 to mirror 127.0.0.1:37737 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:31:09,851 WARN [Thread-1046 {}] hdfs.DataStreamer(1850): Abandoning BP-2105580279-172.17.0.2-1731576630823:blk_1073741903_1088 2024-11-14T09:31:09,851 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1006583690_22 at /127.0.0.1:39782 [Receiving block BP-2105580279-172.17.0.2-1731576630823:blk_1073741903_1088] {}] datanode.BlockReceiver(316): Block 1073741903 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-14T09:31:09,851 DEBUG [RS_CLOSE_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/.tmp/info/63716f69b2cf4d20a6dfa753104ac885 is 1079, key is tmprow/info:/1731576669638/Put/seqid=0 2024-11-14T09:31:09,851 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1006583690_22 at /127.0.0.1:39782 [Receiving block BP-2105580279-172.17.0.2-1731576630823:blk_1073741903_1088] {}] datanode.DataXceiver(331): 127.0.0.1:35455:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39782 dst: /127.0.0.1:35455 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:31:09,851 WARN [Thread-1046 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK] 2024-11-14T09:31:09,853 WARN [Thread-1046 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741904_1089 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:31:09,853 WARN [Thread-1046 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2105580279-172.17.0.2-1731576630823:blk_1073741904_1089 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37063,DS-c2603025-b29b-4678-b265-b194df3ecbcd,DISK], DatanodeInfoWithStorage[127.0.0.1:42393,DS-b1363d96-78de-4629-8c46-e5cd4cbb9e02,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37063,DS-c2603025-b29b-4678-b265-b194df3ecbcd,DISK]) is bad. 2024-11-14T09:31:09,853 WARN [Thread-1046 {}] hdfs.DataStreamer(1850): Abandoning BP-2105580279-172.17.0.2-1731576630823:blk_1073741904_1089 2024-11-14T09:31:09,853 WARN [Thread-1046 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37063,DS-c2603025-b29b-4678-b265-b194df3ecbcd,DISK] 2024-11-14T09:31:09,854 WARN [Thread-1047 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741905_1090 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37063 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:31:09,854 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1006583690_22 at /127.0.0.1:41024 [Receiving block BP-2105580279-172.17.0.2-1731576630823:blk_1073741905_1090] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ffe8a70d-1c55-147c-4cf7-f32b5e8ed0b0/cluster_81ef3b20-c9a7-5a06-86c1-55a43558ceb4/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ffe8a70d-1c55-147c-4cf7-f32b5e8ed0b0/cluster_81ef3b20-c9a7-5a06-86c1-55a43558ceb4/data/data8]'}, localName='127.0.0.1:42393', datanodeUuid='8d91c2d0-a1ea-4db7-bada-85ae4e8ce831', xmitsInProgress=0}:Exception transferring block BP-2105580279-172.17.0.2-1731576630823:blk_1073741905_1090 to mirror 127.0.0.1:37063 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:31:09,854 WARN [Thread-1047 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2105580279-172.17.0.2-1731576630823:blk_1073741905_1090 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42393,DS-b1363d96-78de-4629-8c46-e5cd4cbb9e02,DISK], DatanodeInfoWithStorage[127.0.0.1:37063,DS-c2603025-b29b-4678-b265-b194df3ecbcd,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37063,DS-c2603025-b29b-4678-b265-b194df3ecbcd,DISK]) is bad. 2024-11-14T09:31:09,854 WARN [Thread-1047 {}] hdfs.DataStreamer(1850): Abandoning BP-2105580279-172.17.0.2-1731576630823:blk_1073741905_1090 2024-11-14T09:31:09,854 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1006583690_22 at /127.0.0.1:41024 [Receiving block BP-2105580279-172.17.0.2-1731576630823:blk_1073741905_1090] {}] datanode.BlockReceiver(316): Block 1073741905 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-14T09:31:09,854 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1006583690_22 at /127.0.0.1:41024 [Receiving block BP-2105580279-172.17.0.2-1731576630823:blk_1073741905_1090] {}] datanode.DataXceiver(331): 127.0.0.1:42393:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41024 dst: /127.0.0.1:42393 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:31:09,855 WARN [Thread-1047 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37063,DS-c2603025-b29b-4678-b265-b194df3ecbcd,DISK] 2024-11-14T09:31:09,861 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:31:09,861 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:31:09,862 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:31:09,862 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:31:09,862 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:31:09,862 INFO [regionserver/83f56b55f2af:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta with entries=8, filesize=2.33 KB; new WAL /user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576669847.meta 2024-11-14T09:31:09,862 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:31:09,863 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37737,DS-14b230e2-bc33-41e8-94cd-bedfa6acd39f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:31:09,863 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta 2024-11-14T09:31:09,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35455 is added to blk_1073741907_1092 (size=6027) 2024-11-14T09:31:09,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42393 is added to blk_1073741907_1092 (size=6027) 2024-11-14T09:31:09,863 WARN [IPC Server handler 0 on default port 36133 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta has not been closed. Lease recovery is in progress. RecoveryId = 1093 for block blk_1073741834_1010 2024-11-14T09:31:09,864 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta after 1ms 2024-11-14T09:31:09,864 INFO [RS_CLOSE_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=84 (bloomFilter=true), to=hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/.tmp/info/63716f69b2cf4d20a6dfa753104ac885 2024-11-14T09:31:09,870 DEBUG [RS_CLOSE_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/.tmp/info/63716f69b2cf4d20a6dfa753104ac885 as hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/info/63716f69b2cf4d20a6dfa753104ac885 2024-11-14T09:31:09,870 DEBUG [regionserver/83f56b55f2af:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38567:38567),(127.0.0.1/127.0.0.1:34607:34607)] 2024-11-14T09:31:09,870 DEBUG [regionserver/83f56b55f2af:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta is not closed yet, will try archiving it next time 2024-11-14T09:31:09,875 INFO [RS_CLOSE_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/info/63716f69b2cf4d20a6dfa753104ac885, entries=1, sequenceid=84, filesize=5.9 K 2024-11-14T09:31:09,877 INFO [RS_CLOSE_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1075, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 6901bf69dd23f7cb187b9800f3d62146 in 32ms, sequenceid=84, compaction requested=false 2024-11-14T09:31:09,877 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731576632731.6901bf69dd23f7cb187b9800f3d62146.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/info/e030a042cda1436ebb28492b5588b32e, hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/info/3cbf54693eb8475d883341abf8e9d075, hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/info/11c5d17733b24d30bcc63bc432b3bffe, hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/info/fc0dabbfb9174dff89a6676ee66aa0a2, hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/info/45e66f42f05940fb8a2564a8c7886e27, hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/info/28845e91a3624ec0a4dba43752cd2684, hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/info/680a64e4da1d4096b3c0dd192e8d66ec, hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/info/0a7cbfd9828041f6b217674dfa7db081, hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/info/0b13b69a4a61497da0d503bb96eb47ca] to archive 2024-11-14T09:31:09,878 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731576632731.6901bf69dd23f7cb187b9800f3d62146.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-14T09:31:09,880 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731576632731.6901bf69dd23f7cb187b9800f3d62146.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/info/e030a042cda1436ebb28492b5588b32e to hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/info/e030a042cda1436ebb28492b5588b32e 2024-11-14T09:31:09,881 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731576632731.6901bf69dd23f7cb187b9800f3d62146.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/info/3cbf54693eb8475d883341abf8e9d075 to hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/info/3cbf54693eb8475d883341abf8e9d075 2024-11-14T09:31:09,882 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731576632731.6901bf69dd23f7cb187b9800f3d62146.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/info/11c5d17733b24d30bcc63bc432b3bffe to hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/info/11c5d17733b24d30bcc63bc432b3bffe 2024-11-14T09:31:09,884 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731576632731.6901bf69dd23f7cb187b9800f3d62146.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/info/fc0dabbfb9174dff89a6676ee66aa0a2 to hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/info/fc0dabbfb9174dff89a6676ee66aa0a2 2024-11-14T09:31:09,885 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731576632731.6901bf69dd23f7cb187b9800f3d62146.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/info/45e66f42f05940fb8a2564a8c7886e27 to hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/info/45e66f42f05940fb8a2564a8c7886e27 2024-11-14T09:31:09,886 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731576632731.6901bf69dd23f7cb187b9800f3d62146.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/info/28845e91a3624ec0a4dba43752cd2684 to hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/info/28845e91a3624ec0a4dba43752cd2684 2024-11-14T09:31:09,887 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731576632731.6901bf69dd23f7cb187b9800f3d62146.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/info/680a64e4da1d4096b3c0dd192e8d66ec to hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/info/680a64e4da1d4096b3c0dd192e8d66ec 2024-11-14T09:31:09,888 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731576632731.6901bf69dd23f7cb187b9800f3d62146.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/info/0a7cbfd9828041f6b217674dfa7db081 to hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/info/0a7cbfd9828041f6b217674dfa7db081 2024-11-14T09:31:09,890 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731576632731.6901bf69dd23f7cb187b9800f3d62146.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/info/0b13b69a4a61497da0d503bb96eb47ca to hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/info/0b13b69a4a61497da0d503bb96eb47ca 2024-11-14T09:31:09,890 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731576632731.6901bf69dd23f7cb187b9800f3d62146.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=83f56b55f2af:40525 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-14T09:31:09,890 WARN [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731576632731.6901bf69dd23f7cb187b9800f3d62146.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [e030a042cda1436ebb28492b5588b32e=10347, 3cbf54693eb8475d883341abf8e9d075=12506, 11c5d17733b24d30bcc63bc432b3bffe=17994, fc0dabbfb9174dff89a6676ee66aa0a2=6027, 45e66f42f05940fb8a2564a8c7886e27=6027, 28845e91a3624ec0a4dba43752cd2684=18097, 680a64e4da1d4096b3c0dd192e8d66ec=6027, 0a7cbfd9828041f6b217674dfa7db081=8190, 0b13b69a4a61497da0d503bb96eb47ca=14660] 2024-11-14T09:31:09,891 DEBUG [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/hbase/meta/1588230740/.tmp/info/b3a8b3b804284f5cad2036cc95b210d8 is 203, key is TestLogRolling-testLogRollOnDatanodeDeath,,1731576632731.6901bf69dd23f7cb187b9800f3d62146./info:regioninfo/1731576633092/Put/seqid=0 2024-11-14T09:31:09,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35455 is added to blk_1073741908_1094 (size=7089) 2024-11-14T09:31:09,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42393 is added to blk_1073741908_1094 (size=7089) 2024-11-14T09:31:09,896 INFO [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.50 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/hbase/meta/1588230740/.tmp/info/b3a8b3b804284f5cad2036cc95b210d8 2024-11-14T09:31:09,902 DEBUG [RS_CLOSE_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6901bf69dd23f7cb187b9800f3d62146/recovered.edits/87.seqid, newMaxSeqId=87, maxSeqId=1 2024-11-14T09:31:09,903 INFO [RS_CLOSE_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731576632731.6901bf69dd23f7cb187b9800f3d62146. 2024-11-14T09:31:09,903 DEBUG [RS_CLOSE_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 6901bf69dd23f7cb187b9800f3d62146: Waiting for close lock at 1731576669845Running coprocessor pre-close hooks at 1731576669845Disabling compacts and flushes for region at 1731576669845Disabling writes for close at 1731576669845Obtaining lock to block concurrent updates at 1731576669845Preparing flush snapshotting stores in 6901bf69dd23f7cb187b9800f3d62146 at 1731576669845Finished memstore snapshotting TestLogRolling-testLogRollOnDatanodeDeath,,1731576632731.6901bf69dd23f7cb187b9800f3d62146., syncing WAL and waiting on mvcc, flushsize=dataSize=1075, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1731576669846 (+1 ms)Flushing stores of TestLogRolling-testLogRollOnDatanodeDeath,,1731576632731.6901bf69dd23f7cb187b9800f3d62146. at 1731576669847 (+1 ms)Flushing 6901bf69dd23f7cb187b9800f3d62146/info: creating writer at 1731576669847Flushing 6901bf69dd23f7cb187b9800f3d62146/info: appending metadata at 1731576669850 (+3 ms)Flushing 6901bf69dd23f7cb187b9800f3d62146/info: closing flushed file at 1731576669850Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@56e52983: reopening flushed file at 1731576669869 (+19 ms)Finished flush of dataSize ~1.05 KB/1075, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 6901bf69dd23f7cb187b9800f3d62146 in 32ms, sequenceid=84, compaction requested=false at 1731576669877 (+8 ms)Writing region close event to WAL at 1731576669891 (+14 ms)Running coprocessor post-close hooks at 1731576669903 (+12 ms)Closed at 1731576669903 2024-11-14T09:31:09,903 DEBUG [RS_CLOSE_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731576632731.6901bf69dd23f7cb187b9800f3d62146. 2024-11-14T09:31:09,924 DEBUG [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/hbase/meta/1588230740/.tmp/ns/7d6013161e1d48ad93b679d45e0594b2 is 43, key is default/ns:d/1731576632581/Put/seqid=0 2024-11-14T09:31:09,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35455 is added to blk_1073741909_1095 (size=5153) 2024-11-14T09:31:09,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42393 is added to blk_1073741909_1095 (size=5153) 2024-11-14T09:31:09,930 INFO [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/hbase/meta/1588230740/.tmp/ns/7d6013161e1d48ad93b679d45e0594b2 2024-11-14T09:31:09,957 DEBUG [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/hbase/meta/1588230740/.tmp/table/73651e2849164dfe881270413c7d940e is 77, key is TestLogRolling-testLogRollOnDatanodeDeath/table:state/1731576633103/Put/seqid=0 2024-11-14T09:31:09,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35455 is added to blk_1073741910_1096 (size=5424) 2024-11-14T09:31:09,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42393 is added to blk_1073741910_1096 (size=5424) 2024-11-14T09:31:09,963 INFO [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=146 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/hbase/meta/1588230740/.tmp/table/73651e2849164dfe881270413c7d940e 2024-11-14T09:31:09,969 DEBUG [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/hbase/meta/1588230740/.tmp/info/b3a8b3b804284f5cad2036cc95b210d8 as hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/hbase/meta/1588230740/info/b3a8b3b804284f5cad2036cc95b210d8 2024-11-14T09:31:09,975 INFO [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/hbase/meta/1588230740/info/b3a8b3b804284f5cad2036cc95b210d8, entries=10, sequenceid=11, filesize=6.9 K 2024-11-14T09:31:09,976 DEBUG [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/hbase/meta/1588230740/.tmp/ns/7d6013161e1d48ad93b679d45e0594b2 as hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/hbase/meta/1588230740/ns/7d6013161e1d48ad93b679d45e0594b2 2024-11-14T09:31:09,978 INFO [regionserver/83f56b55f2af:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-14T09:31:09,981 INFO [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/hbase/meta/1588230740/ns/7d6013161e1d48ad93b679d45e0594b2, entries=2, sequenceid=11, filesize=5.0 K 2024-11-14T09:31:09,982 DEBUG [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/hbase/meta/1588230740/.tmp/table/73651e2849164dfe881270413c7d940e as hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/hbase/meta/1588230740/table/73651e2849164dfe881270413c7d940e 2024-11-14T09:31:09,988 INFO [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/hbase/meta/1588230740/table/73651e2849164dfe881270413c7d940e, entries=2, sequenceid=11, filesize=5.3 K 2024-11-14T09:31:09,989 INFO [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 143ms, sequenceid=11, compaction requested=false 2024-11-14T09:31:09,994 DEBUG [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-14T09:31:09,995 DEBUG [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-14T09:31:09,995 INFO [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-14T09:31:09,995 DEBUG [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731576669846Running coprocessor pre-close hooks at 1731576669846Disabling compacts and flushes for region at 1731576669846Disabling writes for close at 1731576669846Obtaining lock to block concurrent updates at 1731576669846Preparing flush snapshotting stores in 1588230740 at 1731576669846Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1752, getHeapSize=3776, getOffHeapSize=0, getCellsCount=14 at 1731576669846Flushing stores of hbase:meta,,1.1588230740 at 1731576669871 (+25 ms)Flushing 1588230740/info: creating writer at 1731576669871Flushing 1588230740/info: appending metadata at 1731576669890 (+19 ms)Flushing 1588230740/info: closing flushed file at 1731576669890Flushing 1588230740/ns: creating writer at 1731576669902 (+12 ms)Flushing 1588230740/ns: appending metadata at 1731576669924 (+22 ms)Flushing 1588230740/ns: closing flushed file at 1731576669924Flushing 1588230740/table: creating writer at 1731576669936 (+12 ms)Flushing 1588230740/table: appending metadata at 1731576669956 (+20 ms)Flushing 1588230740/table: closing flushed file at 1731576669956Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1255aa8c: reopening flushed file at 1731576669968 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@33de5181: reopening flushed file at 1731576669975 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@64857857: reopening flushed file at 1731576669981 (+6 ms)Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 143ms, sequenceid=11, compaction requested=false at 1731576669989 (+8 ms)Writing region close event to WAL at 1731576669991 (+2 ms)Running coprocessor post-close hooks at 1731576669994 (+3 ms)Closed at 1731576669995 (+1 ms) 2024-11-14T09:31:09,995 DEBUG [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-14T09:31:10,002 INFO [regionserver/83f56b55f2af:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-14T09:31:10,002 INFO [regionserver/83f56b55f2af:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-14T09:31:10,046 INFO [RS:0;83f56b55f2af:44811 {}] regionserver.HRegionServer(976): stopping server 83f56b55f2af,44811,1731576631688; all regions closed. 2024-11-14T09:31:10,046 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:31:10,046 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:31:10,047 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:31:10,047 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:31:10,047 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:31:10,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42393 is added to blk_1073741906_1091 (size=825) 2024-11-14T09:31:10,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35455 is added to blk_1073741906_1091 (size=825) 2024-11-14T09:31:10,371 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@279ccdac[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:42393, datanodeUuid=8d91c2d0-a1ea-4db7-bada-85ae4e8ce831, infoPort=34607, infoSecurePort=0, ipcPort=45647, storageInfo=lv=-57;cid=testClusterID;nsid=1875755621;c=1731576630823):Failed to transfer BP-2105580279-172.17.0.2-1731576630823:blk_1073741877_1060 to 127.0.0.1:37063 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:31:10,700 INFO [regionserver/83f56b55f2af:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-14T09:31:10,702 INFO [regionserver/83f56b55f2af:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-14T09:31:10,702 INFO [regionserver/83f56b55f2af:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-14T09:31:11,793 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-14T09:31:11,794 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-14T09:31:11,794 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-14T09:31:13,504 INFO [master/83f56b55f2af:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-14T09:31:13,504 INFO [master/83f56b55f2af:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-14T09:31:13,848 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 after 4002ms 2024-11-14T09:31:13,864 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta after 4001ms 2024-11-14T09:31:13,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42393 is added to blk_1073741835_1011 (size=393) 2024-11-14T09:31:13,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42393 is added to blk_1073741831_1007 (size=1321) 2024-11-14T09:31:14,846 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-14T09:31:14,849 DEBUG [RS:1;83f56b55f2af:41383 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/oldWALs 2024-11-14T09:31:14,849 INFO [RS:1;83f56b55f2af:41383 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 83f56b55f2af%2C41383%2C1731576632641:(num 1731576632831) 2024-11-14T09:31:14,849 DEBUG [RS:1;83f56b55f2af:41383 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T09:31:14,849 INFO [RS:1;83f56b55f2af:41383 {}] regionserver.LeaseManager(133): Closed leases 2024-11-14T09:31:14,849 INFO [RS:1;83f56b55f2af:41383 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-14T09:31:14,849 INFO [RS:1;83f56b55f2af:41383 {}] hbase.ChoreService(370): Chore service for: regionserver/83f56b55f2af:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-14T09:31:14,849 INFO [RS:1;83f56b55f2af:41383 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-14T09:31:14,849 INFO [RS:1;83f56b55f2af:41383 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-14T09:31:14,849 INFO [RS:1;83f56b55f2af:41383 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-14T09:31:14,849 INFO [regionserver/83f56b55f2af:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-14T09:31:14,849 INFO [RS:1;83f56b55f2af:41383 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-14T09:31:14,849 INFO [RS:1;83f56b55f2af:41383 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41383 2024-11-14T09:31:14,851 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40525-0x10115d07bd00000, quorum=127.0.0.1:56059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-14T09:31:14,851 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41383-0x10115d07bd00002, quorum=127.0.0.1:56059, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/83f56b55f2af,41383,1731576632641 2024-11-14T09:31:14,852 INFO [RS:1;83f56b55f2af:41383 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-14T09:31:14,854 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [83f56b55f2af,41383,1731576632641] 2024-11-14T09:31:14,854 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:31:14,856 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/83f56b55f2af,41383,1731576632641 already deleted, retry=false 2024-11-14T09:31:14,856 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 83f56b55f2af,41383,1731576632641 expired; onlineServers=1 2024-11-14T09:31:14,904 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:31:14,917 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:31:14,917 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:31:14,918 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:31:14,918 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:31:14,918 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:31:14,928 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:31:14,929 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:31:14,954 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41383-0x10115d07bd00002, quorum=127.0.0.1:56059, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T09:31:14,954 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41383-0x10115d07bd00002, quorum=127.0.0.1:56059, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T09:31:14,954 INFO [RS:1;83f56b55f2af:41383 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-14T09:31:14,955 INFO [RS:1;83f56b55f2af:41383 {}] regionserver.HRegionServer(1031): Exiting; stopping=83f56b55f2af,41383,1731576632641; zookeeper connection closed. 2024-11-14T09:31:14,955 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@42a60835 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@42a60835 2024-11-14T09:31:14,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42393 is added to blk_1073741827_1003 (size=196) 2024-11-14T09:31:14,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42393 is added to blk_1073741829_1005 (size=34) 2024-11-14T09:31:15,047 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-14T09:31:15,051 DEBUG [RS:0;83f56b55f2af:44811 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/oldWALs 2024-11-14T09:31:15,051 INFO [RS:0;83f56b55f2af:44811 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 83f56b55f2af%2C44811%2C1731576631688.meta:.meta(num 1731576669847) 2024-11-14T09:31:15,051 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:31:15,051 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:31:15,051 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:31:15,051 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:31:15,052 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:31:15,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35455 is added to blk_1073741893_1077 (size=18156) 2024-11-14T09:31:15,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42393 is added to blk_1073741893_1077 (size=18156) 2024-11-14T09:31:15,056 DEBUG [RS:0;83f56b55f2af:44811 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/oldWALs 2024-11-14T09:31:15,056 INFO [RS:0;83f56b55f2af:44811 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 83f56b55f2af%2C44811%2C1731576631688:(num 1731576669194) 2024-11-14T09:31:15,056 DEBUG [RS:0;83f56b55f2af:44811 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T09:31:15,056 INFO [RS:0;83f56b55f2af:44811 {}] regionserver.LeaseManager(133): Closed leases 2024-11-14T09:31:15,056 INFO [RS:0;83f56b55f2af:44811 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-14T09:31:15,056 INFO [RS:0;83f56b55f2af:44811 {}] hbase.ChoreService(370): Chore service for: regionserver/83f56b55f2af:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-14T09:31:15,056 INFO [RS:0;83f56b55f2af:44811 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-14T09:31:15,056 INFO [regionserver/83f56b55f2af:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-14T09:31:15,056 INFO [RS:0;83f56b55f2af:44811 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44811 2024-11-14T09:31:15,058 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44811-0x10115d07bd00001, quorum=127.0.0.1:56059, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/83f56b55f2af,44811,1731576631688 2024-11-14T09:31:15,058 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40525-0x10115d07bd00000, quorum=127.0.0.1:56059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-14T09:31:15,058 INFO [RS:0;83f56b55f2af:44811 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-14T09:31:15,060 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [83f56b55f2af,44811,1731576631688] 2024-11-14T09:31:15,062 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/83f56b55f2af,44811,1731576631688 already deleted, retry=false 2024-11-14T09:31:15,062 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 83f56b55f2af,44811,1731576631688 expired; onlineServers=0 2024-11-14T09:31:15,062 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '83f56b55f2af,40525,1731576631633' ***** 2024-11-14T09:31:15,062 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-14T09:31:15,062 INFO [M:0;83f56b55f2af:40525 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-14T09:31:15,062 INFO [M:0;83f56b55f2af:40525 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-14T09:31:15,063 DEBUG [M:0;83f56b55f2af:40525 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-14T09:31:15,063 DEBUG [M:0;83f56b55f2af:40525 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-14T09:31:15,063 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-14T09:31:15,063 DEBUG [master/83f56b55f2af:0:becomeActiveMaster-HFileCleaner.small.0-1731576631890 {}] cleaner.HFileCleaner(306): Exit Thread[master/83f56b55f2af:0:becomeActiveMaster-HFileCleaner.small.0-1731576631890,5,FailOnTimeoutGroup] 2024-11-14T09:31:15,063 DEBUG [master/83f56b55f2af:0:becomeActiveMaster-HFileCleaner.large.0-1731576631890 {}] cleaner.HFileCleaner(306): Exit Thread[master/83f56b55f2af:0:becomeActiveMaster-HFileCleaner.large.0-1731576631890,5,FailOnTimeoutGroup] 2024-11-14T09:31:15,063 INFO [M:0;83f56b55f2af:40525 {}] hbase.ChoreService(370): Chore service for: master/83f56b55f2af:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-14T09:31:15,063 INFO [M:0;83f56b55f2af:40525 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-14T09:31:15,063 DEBUG [M:0;83f56b55f2af:40525 {}] master.HMaster(1795): Stopping service threads 2024-11-14T09:31:15,063 INFO [M:0;83f56b55f2af:40525 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-14T09:31:15,063 INFO [M:0;83f56b55f2af:40525 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-14T09:31:15,063 INFO [M:0;83f56b55f2af:40525 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-14T09:31:15,064 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-14T09:31:15,065 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40525-0x10115d07bd00000, quorum=127.0.0.1:56059, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-14T09:31:15,065 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40525-0x10115d07bd00000, quorum=127.0.0.1:56059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:31:15,065 DEBUG [M:0;83f56b55f2af:40525 {}] zookeeper.ZKUtil(347): master:40525-0x10115d07bd00000, quorum=127.0.0.1:56059, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-14T09:31:15,065 WARN [M:0;83f56b55f2af:40525 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-14T09:31:15,065 INFO [M:0;83f56b55f2af:40525 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/.lastflushedseqids 2024-11-14T09:31:15,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35455 is added to blk_1073741911_1097 (size=130) 2024-11-14T09:31:15,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42393 is added to blk_1073741911_1097 (size=130) 2024-11-14T09:31:15,071 INFO [M:0;83f56b55f2af:40525 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-14T09:31:15,071 INFO [M:0;83f56b55f2af:40525 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-14T09:31:15,071 DEBUG [M:0;83f56b55f2af:40525 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-14T09:31:15,071 INFO [M:0;83f56b55f2af:40525 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:31:15,072 DEBUG [M:0;83f56b55f2af:40525 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:31:15,072 DEBUG [M:0;83f56b55f2af:40525 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-14T09:31:15,072 DEBUG [M:0;83f56b55f2af:40525 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:31:15,072 INFO [M:0;83f56b55f2af:40525 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.26 KB heapSize=29.50 KB 2024-11-14T09:31:15,088 DEBUG [M:0;83f56b55f2af:40525 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/582604fcafa64e8b8836d86067c58231 is 82, key is hbase:meta,,1/info:regioninfo/1731576632555/Put/seqid=0 2024-11-14T09:31:15,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42393 is added to blk_1073741912_1098 (size=5672) 2024-11-14T09:31:15,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35455 is added to blk_1073741912_1098 (size=5672) 2024-11-14T09:31:15,093 INFO [M:0;83f56b55f2af:40525 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/582604fcafa64e8b8836d86067c58231 2024-11-14T09:31:15,112 DEBUG [M:0;83f56b55f2af:40525 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c0946796e0f947fab74d5300608a222a is 775, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731576633107/Put/seqid=0 2024-11-14T09:31:15,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35455 is added to blk_1073741913_1099 (size=6256) 2024-11-14T09:31:15,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42393 is added to blk_1073741913_1099 (size=6256) 2024-11-14T09:31:15,117 INFO [M:0;83f56b55f2af:40525 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.59 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c0946796e0f947fab74d5300608a222a 2024-11-14T09:31:15,122 INFO [M:0;83f56b55f2af:40525 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for c0946796e0f947fab74d5300608a222a 2024-11-14T09:31:15,137 DEBUG [M:0;83f56b55f2af:40525 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/026ee9a355de403681891ad8a1b8f6e9 is 69, key is 83f56b55f2af,41383,1731576632641/rs:state/1731576632683/Put/seqid=0 2024-11-14T09:31:15,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35455 is added to blk_1073741914_1100 (size=5224) 2024-11-14T09:31:15,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42393 is added to blk_1073741914_1100 (size=5224) 2024-11-14T09:31:15,142 INFO [M:0;83f56b55f2af:40525 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/026ee9a355de403681891ad8a1b8f6e9 2024-11-14T09:31:15,160 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44811-0x10115d07bd00001, quorum=127.0.0.1:56059, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T09:31:15,160 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44811-0x10115d07bd00001, quorum=127.0.0.1:56059, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T09:31:15,160 INFO [RS:0;83f56b55f2af:44811 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-14T09:31:15,160 INFO [RS:0;83f56b55f2af:44811 {}] regionserver.HRegionServer(1031): Exiting; stopping=83f56b55f2af,44811,1731576631688; zookeeper connection closed. 2024-11-14T09:31:15,161 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@3e0ca73d {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@3e0ca73d 2024-11-14T09:31:15,161 DEBUG [M:0;83f56b55f2af:40525 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/467b399f8945409683a98ff98148a942 is 52, key is load_balancer_on/state:d/1731576632624/Put/seqid=0 2024-11-14T09:31:15,161 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-11-14T09:31:15,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35455 is added to blk_1073741915_1101 (size=5056) 2024-11-14T09:31:15,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42393 is added to blk_1073741915_1101 (size=5056) 2024-11-14T09:31:15,166 INFO [M:0;83f56b55f2af:40525 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/467b399f8945409683a98ff98148a942 2024-11-14T09:31:15,171 DEBUG [M:0;83f56b55f2af:40525 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/582604fcafa64e8b8836d86067c58231 as hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/582604fcafa64e8b8836d86067c58231 2024-11-14T09:31:15,176 INFO [M:0;83f56b55f2af:40525 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/582604fcafa64e8b8836d86067c58231, entries=8, sequenceid=60, filesize=5.5 K 2024-11-14T09:31:15,177 DEBUG [M:0;83f56b55f2af:40525 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c0946796e0f947fab74d5300608a222a as hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/c0946796e0f947fab74d5300608a222a 2024-11-14T09:31:15,181 INFO [M:0;83f56b55f2af:40525 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for c0946796e0f947fab74d5300608a222a 2024-11-14T09:31:15,181 INFO [M:0;83f56b55f2af:40525 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/c0946796e0f947fab74d5300608a222a, entries=6, sequenceid=60, filesize=6.1 K 2024-11-14T09:31:15,182 DEBUG [M:0;83f56b55f2af:40525 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/026ee9a355de403681891ad8a1b8f6e9 as hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/026ee9a355de403681891ad8a1b8f6e9 2024-11-14T09:31:15,187 INFO [M:0;83f56b55f2af:40525 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/026ee9a355de403681891ad8a1b8f6e9, entries=2, sequenceid=60, filesize=5.1 K 2024-11-14T09:31:15,188 DEBUG [M:0;83f56b55f2af:40525 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/467b399f8945409683a98ff98148a942 as hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/467b399f8945409683a98ff98148a942 2024-11-14T09:31:15,192 INFO [M:0;83f56b55f2af:40525 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/467b399f8945409683a98ff98148a942, entries=1, sequenceid=60, filesize=4.9 K 2024-11-14T09:31:15,194 INFO [M:0;83f56b55f2af:40525 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.26 KB/23817, heapSize ~29.44 KB/30144, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 121ms, sequenceid=60, compaction requested=false 2024-11-14T09:31:15,195 INFO [M:0;83f56b55f2af:40525 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:31:15,195 DEBUG [M:0;83f56b55f2af:40525 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731576675071Disabling compacts and flushes for region at 1731576675071Disabling writes for close at 1731576675072 (+1 ms)Obtaining lock to block concurrent updates at 1731576675072Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731576675072Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23817, getHeapSize=30144, getOffHeapSize=0, getCellsCount=71 at 1731576675072Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731576675073 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731576675073Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731576675087 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731576675087Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731576675098 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731576675111 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731576675111Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731576675122 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731576675136 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731576675136Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731576675146 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731576675160 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731576675160Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7e62c918: reopening flushed file at 1731576675171 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@38c9c3df: reopening flushed file at 1731576675176 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5c8183b8: reopening flushed file at 1731576675181 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@38773fcc: reopening flushed file at 1731576675187 (+6 ms)Finished flush of dataSize ~23.26 KB/23817, heapSize ~29.44 KB/30144, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 121ms, sequenceid=60, compaction requested=false at 1731576675194 (+7 ms)Writing region close event to WAL at 1731576675195 (+1 ms)Closed at 1731576675195 2024-11-14T09:31:15,196 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:31:15,196 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:31:15,196 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:31:15,196 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:31:15,196 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:31:15,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35455 is added to blk_1073741889_1072 (size=1045) 2024-11-14T09:31:15,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42393 is added to blk_1073741889_1072 (size=1045) 2024-11-14T09:31:15,431 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-14T09:31:15,444 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:31:15,444 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:31:15,444 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:31:15,445 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:31:15,445 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:31:15,446 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:31:15,449 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:31:15,451 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:31:15,855 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:31:15,865 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:31:16,005 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@1584e2e5 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-2105580279-172.17.0.2-1731576630823:blk_1073741830_1006, datanode=DatanodeInfoWithStorage[127.0.0.1:37737,null,null]) java.net.ConnectException: Call From 83f56b55f2af/172.17.0.2 to localhost:44553 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-14T09:31:16,856 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:31:16,866 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:31:16,909 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/MasterData/WALs/83f56b55f2af,40525,1731576631633/83f56b55f2af%2C40525%2C1731576631633.1731576631798 to hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/MasterData/oldWALs/83f56b55f2af%2C40525%2C1731576631633.1731576631798 2024-11-14T09:31:16,912 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/MasterData/oldWALs/83f56b55f2af%2C40525%2C1731576631633.1731576631798 to hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/oldWALs/83f56b55f2af%2C40525%2C1731576631633.1731576631798$masterlocalwal$ 2024-11-14T09:31:16,912 INFO [M:0;83f56b55f2af:40525 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-14T09:31:16,912 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-14T09:31:16,912 INFO [M:0;83f56b55f2af:40525 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40525 2024-11-14T09:31:16,912 INFO [M:0;83f56b55f2af:40525 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-14T09:31:16,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42393 is added to blk_1073741836_1012 (size=76) 2024-11-14T09:31:16,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42393 is added to blk_1073741825_1001 (size=7) 2024-11-14T09:31:17,014 INFO [M:0;83f56b55f2af:40525 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-14T09:31:17,014 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40525-0x10115d07bd00000, quorum=127.0.0.1:56059, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T09:31:17,014 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40525-0x10115d07bd00000, quorum=127.0.0.1:56059, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T09:31:17,017 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@471de816{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T09:31:17,017 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@fd22f99{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T09:31:17,017 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T09:31:17,017 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@72ba7a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T09:31:17,017 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@633e3771{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ffe8a70d-1c55-147c-4cf7-f32b5e8ed0b0/hadoop.log.dir/,STOPPED} 2024-11-14T09:31:17,018 WARN [BP-2105580279-172.17.0.2-1731576630823 heartbeating to localhost/127.0.0.1:36133 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T09:31:17,018 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T09:31:17,018 WARN [BP-2105580279-172.17.0.2-1731576630823 heartbeating to localhost/127.0.0.1:36133 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2105580279-172.17.0.2-1731576630823 (Datanode Uuid 340cd0fe-f1d0-4fc4-8a97-207897c75a92) service to localhost/127.0.0.1:36133 2024-11-14T09:31:17,019 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T09:31:17,018 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@6d035d57 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-2105580279-172.17.0.2-1731576630823:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:37737,null,null]) java.io.InterruptedIOException: DestHost:destPort localhost:44553 , LocalHost:localPort 83f56b55f2af/172.17.0.2:0. Failed on local exception: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:936) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:963) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more Caused by: java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-14T09:31:17,019 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@6d035d57 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-2105580279-172.17.0.2-1731576630823:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:35455,null,null]) java.io.IOException: No block pool offer service for bpid=BP-2105580279-172.17.0.2-1731576630823 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:31:17,019 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@6d035d57 {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-2105580279-172.17.0.2-1731576630823:blk_1073741837_1013; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:37737,null,null], DatanodeInfoWithStorage[127.0.0.1:35455,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: All datanodes failed: block=BP-2105580279-172.17.0.2-1731576630823:blk_1073741837_1013, datanodeids=[DatanodeInfoWithStorage[127.0.0.1:37737,null,null], DatanodeInfoWithStorage[127.0.0.1:35455,null,null]] 2024-11-14T09:31:17,019 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ffe8a70d-1c55-147c-4cf7-f32b5e8ed0b0/cluster_81ef3b20-c9a7-5a06-86c1-55a43558ceb4/data/data3/current/BP-2105580279-172.17.0.2-1731576630823 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T09:31:17,019 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@6d035d57 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-2105580279-172.17.0.2-1731576630823:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:35455,null,null]) java.io.IOException: No block pool offer service for bpid=BP-2105580279-172.17.0.2-1731576630823 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:31:17,019 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@6d035d57 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-2105580279-172.17.0.2-1731576630823:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:37737,null,null]) java.io.IOException: No block pool offer service for bpid=BP-2105580279-172.17.0.2-1731576630823 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:31:17,019 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@6d035d57 {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-2105580279-172.17.0.2-1731576630823:blk_1073741834_1010; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:35455,null,null], DatanodeInfoWithStorage[127.0.0.1:37737,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: All datanodes failed: block=BP-2105580279-172.17.0.2-1731576630823:blk_1073741834_1010, datanodeids=[DatanodeInfoWithStorage[127.0.0.1:35455,null,null], DatanodeInfoWithStorage[127.0.0.1:37737,null,null]] 2024-11-14T09:31:17,019 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ffe8a70d-1c55-147c-4cf7-f32b5e8ed0b0/cluster_81ef3b20-c9a7-5a06-86c1-55a43558ceb4/data/data4/current/BP-2105580279-172.17.0.2-1731576630823 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T09:31:17,020 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T09:31:17,022 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1d6f136d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T09:31:17,022 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2d0b72b6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T09:31:17,022 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T09:31:17,022 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2c1f678{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T09:31:17,022 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@522c0677{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ffe8a70d-1c55-147c-4cf7-f32b5e8ed0b0/hadoop.log.dir/,STOPPED} 2024-11-14T09:31:17,023 WARN [BP-2105580279-172.17.0.2-1731576630823 heartbeating to localhost/127.0.0.1:36133 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T09:31:17,023 WARN [BP-2105580279-172.17.0.2-1731576630823 heartbeating to localhost/127.0.0.1:36133 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2105580279-172.17.0.2-1731576630823 (Datanode Uuid 8d91c2d0-a1ea-4db7-bada-85ae4e8ce831) service to localhost/127.0.0.1:36133 2024-11-14T09:31:17,023 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T09:31:17,024 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T09:31:17,024 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ffe8a70d-1c55-147c-4cf7-f32b5e8ed0b0/cluster_81ef3b20-c9a7-5a06-86c1-55a43558ceb4/data/data7/current/BP-2105580279-172.17.0.2-1731576630823 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T09:31:17,024 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ffe8a70d-1c55-147c-4cf7-f32b5e8ed0b0/cluster_81ef3b20-c9a7-5a06-86c1-55a43558ceb4/data/data8/current/BP-2105580279-172.17.0.2-1731576630823 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T09:31:17,024 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T09:31:17,030 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3287cde6{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-14T09:31:17,031 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@8b1f0fc{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T09:31:17,031 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T09:31:17,031 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3789f604{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T09:31:17,031 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@320b7eeb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ffe8a70d-1c55-147c-4cf7-f32b5e8ed0b0/hadoop.log.dir/,STOPPED} 2024-11-14T09:31:17,039 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-14T09:31:17,067 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-14T09:31:17,077 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=155 (was 78) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-17-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$897/0x00007f4378bf4db8.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36133 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-17-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$897/0x00007f4378bf4db8.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:36133 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-16-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1709469494) connection to localhost/127.0.0.1:36133 from jenkins.hfs.2 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins.hfs.3@localhost:36133 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.1@localhost:43643 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1709469494) connection to localhost/127.0.0.1:36133 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-7-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-17-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1709469494) connection to localhost/127.0.0.1:36133 from jenkins.hfs.3 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-16-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.2@localhost:36133 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'DataNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: ForkJoinPool-2-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: LeaseRenewer:jenkins@localhost:43643 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1709469494) connection to localhost/127.0.0.1:36133 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36133 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36133 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36133 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-16-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=450 (was 402) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=170 (was 216), ProcessCount=11 (was 11), AvailableMemoryMB=6682 (was 6793) 2024-11-14T09:31:17,085 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=155, OpenFileDescriptor=450, MaxFileDescriptor=1048576, SystemLoadAverage=170, ProcessCount=11, AvailableMemoryMB=6682 2024-11-14T09:31:17,085 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-14T09:31:17,085 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ffe8a70d-1c55-147c-4cf7-f32b5e8ed0b0/hadoop.log.dir so I do NOT create it in target/test-data/c08a26b4-a2db-1d80-2bc0-28ffd8f80d88 2024-11-14T09:31:17,085 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ffe8a70d-1c55-147c-4cf7-f32b5e8ed0b0/hadoop.tmp.dir so I do NOT create it in target/test-data/c08a26b4-a2db-1d80-2bc0-28ffd8f80d88 2024-11-14T09:31:17,085 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c08a26b4-a2db-1d80-2bc0-28ffd8f80d88/cluster_930a5f69-a91f-75e3-0e17-a0ac7c392208, deleteOnExit=true 2024-11-14T09:31:17,085 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-14T09:31:17,086 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c08a26b4-a2db-1d80-2bc0-28ffd8f80d88/test.cache.data in system properties and HBase conf 2024-11-14T09:31:17,086 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c08a26b4-a2db-1d80-2bc0-28ffd8f80d88/hadoop.tmp.dir in system properties and HBase conf 2024-11-14T09:31:17,086 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c08a26b4-a2db-1d80-2bc0-28ffd8f80d88/hadoop.log.dir in system properties and HBase conf 2024-11-14T09:31:17,086 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c08a26b4-a2db-1d80-2bc0-28ffd8f80d88/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-14T09:31:17,086 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c08a26b4-a2db-1d80-2bc0-28ffd8f80d88/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-14T09:31:17,086 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-14T09:31:17,086 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-14T09:31:17,086 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c08a26b4-a2db-1d80-2bc0-28ffd8f80d88/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-14T09:31:17,086 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c08a26b4-a2db-1d80-2bc0-28ffd8f80d88/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-14T09:31:17,086 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c08a26b4-a2db-1d80-2bc0-28ffd8f80d88/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-14T09:31:17,086 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c08a26b4-a2db-1d80-2bc0-28ffd8f80d88/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-14T09:31:17,087 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c08a26b4-a2db-1d80-2bc0-28ffd8f80d88/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-14T09:31:17,087 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c08a26b4-a2db-1d80-2bc0-28ffd8f80d88/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-14T09:31:17,087 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c08a26b4-a2db-1d80-2bc0-28ffd8f80d88/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-14T09:31:17,087 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c08a26b4-a2db-1d80-2bc0-28ffd8f80d88/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-14T09:31:17,087 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c08a26b4-a2db-1d80-2bc0-28ffd8f80d88/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-14T09:31:17,087 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c08a26b4-a2db-1d80-2bc0-28ffd8f80d88/nfs.dump.dir in system properties and HBase conf 2024-11-14T09:31:17,087 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c08a26b4-a2db-1d80-2bc0-28ffd8f80d88/java.io.tmpdir in system properties and HBase conf 2024-11-14T09:31:17,087 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c08a26b4-a2db-1d80-2bc0-28ffd8f80d88/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-14T09:31:17,087 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c08a26b4-a2db-1d80-2bc0-28ffd8f80d88/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-14T09:31:17,087 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c08a26b4-a2db-1d80-2bc0-28ffd8f80d88/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-14T09:31:17,100 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-14T09:31:17,170 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T09:31:17,174 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T09:31:17,175 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T09:31:17,175 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T09:31:17,176 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-14T09:31:17,176 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T09:31:17,177 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2e78de33{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c08a26b4-a2db-1d80-2bc0-28ffd8f80d88/hadoop.log.dir/,AVAILABLE} 2024-11-14T09:31:17,177 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6d834c11{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T09:31:17,291 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3657796b{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c08a26b4-a2db-1d80-2bc0-28ffd8f80d88/java.io.tmpdir/jetty-localhost-40583-hadoop-hdfs-3_4_1-tests_jar-_-any-12664245089981950526/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-14T09:31:17,292 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4b0e5af9{HTTP/1.1, (http/1.1)}{localhost:40583} 2024-11-14T09:31:17,292 INFO [Time-limited test {}] server.Server(415): Started @151478ms 2024-11-14T09:31:17,305 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-14T09:31:17,370 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T09:31:17,373 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T09:31:17,374 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T09:31:17,374 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T09:31:17,374 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-14T09:31:17,375 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@c75e827{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c08a26b4-a2db-1d80-2bc0-28ffd8f80d88/hadoop.log.dir/,AVAILABLE} 2024-11-14T09:31:17,375 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@46a4b15f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T09:31:17,488 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@36035128{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c08a26b4-a2db-1d80-2bc0-28ffd8f80d88/java.io.tmpdir/jetty-localhost-36023-hadoop-hdfs-3_4_1-tests_jar-_-any-16242332291711149950/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T09:31:17,488 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3d69d5d5{HTTP/1.1, (http/1.1)}{localhost:36023} 2024-11-14T09:31:17,489 INFO [Time-limited test {}] server.Server(415): Started @151675ms 2024-11-14T09:31:17,490 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T09:31:17,530 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T09:31:17,533 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T09:31:17,534 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T09:31:17,534 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T09:31:17,534 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-14T09:31:17,534 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2b572e4f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c08a26b4-a2db-1d80-2bc0-28ffd8f80d88/hadoop.log.dir/,AVAILABLE} 2024-11-14T09:31:17,535 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@186da9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T09:31:17,596 WARN [Thread-1198 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c08a26b4-a2db-1d80-2bc0-28ffd8f80d88/cluster_930a5f69-a91f-75e3-0e17-a0ac7c392208/data/data2/current/BP-875957633-172.17.0.2-1731576677118/current, will proceed with Du for space computation calculation, 2024-11-14T09:31:17,596 WARN [Thread-1197 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c08a26b4-a2db-1d80-2bc0-28ffd8f80d88/cluster_930a5f69-a91f-75e3-0e17-a0ac7c392208/data/data1/current/BP-875957633-172.17.0.2-1731576677118/current, will proceed with Du for space computation calculation, 2024-11-14T09:31:17,612 WARN [Thread-1176 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T09:31:17,614 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x57bad0429ebb71fc with lease ID 0xeaa8b263883b3791: Processing first storage report for DS-b632735b-7758-4a07-82d2-9408c786a0d0 from datanode DatanodeRegistration(127.0.0.1:33525, datanodeUuid=200ac075-92e1-441f-a5ab-faef2c43db4c, infoPort=42497, infoSecurePort=0, ipcPort=38623, storageInfo=lv=-57;cid=testClusterID;nsid=745623789;c=1731576677118) 2024-11-14T09:31:17,614 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x57bad0429ebb71fc with lease ID 0xeaa8b263883b3791: from storage DS-b632735b-7758-4a07-82d2-9408c786a0d0 node DatanodeRegistration(127.0.0.1:33525, datanodeUuid=200ac075-92e1-441f-a5ab-faef2c43db4c, infoPort=42497, infoSecurePort=0, ipcPort=38623, storageInfo=lv=-57;cid=testClusterID;nsid=745623789;c=1731576677118), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T09:31:17,614 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x57bad0429ebb71fc with lease ID 0xeaa8b263883b3791: Processing first storage report for DS-f0ca00cd-fa32-435a-8053-46cebee01dae from datanode DatanodeRegistration(127.0.0.1:33525, datanodeUuid=200ac075-92e1-441f-a5ab-faef2c43db4c, infoPort=42497, infoSecurePort=0, ipcPort=38623, storageInfo=lv=-57;cid=testClusterID;nsid=745623789;c=1731576677118) 2024-11-14T09:31:17,614 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x57bad0429ebb71fc with lease ID 0xeaa8b263883b3791: from storage DS-f0ca00cd-fa32-435a-8053-46cebee01dae node DatanodeRegistration(127.0.0.1:33525, datanodeUuid=200ac075-92e1-441f-a5ab-faef2c43db4c, infoPort=42497, infoSecurePort=0, ipcPort=38623, storageInfo=lv=-57;cid=testClusterID;nsid=745623789;c=1731576677118), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T09:31:17,655 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@55bd346a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c08a26b4-a2db-1d80-2bc0-28ffd8f80d88/java.io.tmpdir/jetty-localhost-34325-hadoop-hdfs-3_4_1-tests_jar-_-any-9608533289437281052/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T09:31:17,655 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@15ee1d5a{HTTP/1.1, (http/1.1)}{localhost:34325} 2024-11-14T09:31:17,656 INFO [Time-limited test {}] server.Server(415): Started @151842ms 2024-11-14T09:31:17,657 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T09:31:17,765 WARN [Thread-1223 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c08a26b4-a2db-1d80-2bc0-28ffd8f80d88/cluster_930a5f69-a91f-75e3-0e17-a0ac7c392208/data/data3/current/BP-875957633-172.17.0.2-1731576677118/current, will proceed with Du for space computation calculation, 2024-11-14T09:31:17,765 WARN [Thread-1224 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c08a26b4-a2db-1d80-2bc0-28ffd8f80d88/cluster_930a5f69-a91f-75e3-0e17-a0ac7c392208/data/data4/current/BP-875957633-172.17.0.2-1731576677118/current, will proceed with Du for space computation calculation, 2024-11-14T09:31:17,781 WARN [Thread-1212 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T09:31:17,784 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x95020d074f746aac with lease ID 0xeaa8b263883b3792: Processing first storage report for DS-9ea4179b-ad22-4a7e-b55e-76544f1dc7b9 from datanode DatanodeRegistration(127.0.0.1:37137, datanodeUuid=e8426b3a-f727-4b4d-992e-3ca5f0bb3cdd, infoPort=33613, infoSecurePort=0, ipcPort=42175, storageInfo=lv=-57;cid=testClusterID;nsid=745623789;c=1731576677118) 2024-11-14T09:31:17,784 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x95020d074f746aac with lease ID 0xeaa8b263883b3792: from storage DS-9ea4179b-ad22-4a7e-b55e-76544f1dc7b9 node DatanodeRegistration(127.0.0.1:37137, datanodeUuid=e8426b3a-f727-4b4d-992e-3ca5f0bb3cdd, infoPort=33613, infoSecurePort=0, ipcPort=42175, storageInfo=lv=-57;cid=testClusterID;nsid=745623789;c=1731576677118), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T09:31:17,784 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x95020d074f746aac with lease ID 0xeaa8b263883b3792: Processing first storage report for DS-612b11a0-9c2e-4e3b-9dc2-1bee62e764bd from datanode DatanodeRegistration(127.0.0.1:37137, datanodeUuid=e8426b3a-f727-4b4d-992e-3ca5f0bb3cdd, infoPort=33613, infoSecurePort=0, ipcPort=42175, storageInfo=lv=-57;cid=testClusterID;nsid=745623789;c=1731576677118) 2024-11-14T09:31:17,784 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x95020d074f746aac with lease ID 0xeaa8b263883b3792: from storage DS-612b11a0-9c2e-4e3b-9dc2-1bee62e764bd node DatanodeRegistration(127.0.0.1:37137, datanodeUuid=e8426b3a-f727-4b4d-992e-3ca5f0bb3cdd, infoPort=33613, infoSecurePort=0, ipcPort=42175, storageInfo=lv=-57;cid=testClusterID;nsid=745623789;c=1731576677118), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T09:31:17,786 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c08a26b4-a2db-1d80-2bc0-28ffd8f80d88 2024-11-14T09:31:17,789 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c08a26b4-a2db-1d80-2bc0-28ffd8f80d88/cluster_930a5f69-a91f-75e3-0e17-a0ac7c392208/zookeeper_0, clientPort=58079, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c08a26b4-a2db-1d80-2bc0-28ffd8f80d88/cluster_930a5f69-a91f-75e3-0e17-a0ac7c392208/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c08a26b4-a2db-1d80-2bc0-28ffd8f80d88/cluster_930a5f69-a91f-75e3-0e17-a0ac7c392208/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-14T09:31:17,789 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=58079 2024-11-14T09:31:17,790 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:31:17,791 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:31:17,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37137 is added to blk_1073741825_1001 (size=7) 2024-11-14T09:31:17,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33525 is added to blk_1073741825_1001 (size=7) 2024-11-14T09:31:17,800 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8 with version=8 2024-11-14T09:31:17,801 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/hbase-staging 2024-11-14T09:31:17,803 INFO [Time-limited test {}] client.ConnectionUtils(128): master/83f56b55f2af:0 server-side Connection retries=45 2024-11-14T09:31:17,803 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T09:31:17,803 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-14T09:31:17,803 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-14T09:31:17,803 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T09:31:17,803 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-14T09:31:17,803 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-14T09:31:17,803 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-14T09:31:17,804 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35271 2024-11-14T09:31:17,805 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:35271 connecting to ZooKeeper ensemble=127.0.0.1:58079 2024-11-14T09:31:17,812 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:352710x0, quorum=127.0.0.1:58079, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-14T09:31:17,812 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:35271-0x10115d1302c0000 connected 2024-11-14T09:31:17,830 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:31:17,832 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:31:17,834 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35271-0x10115d1302c0000, quorum=127.0.0.1:58079, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T09:31:17,834 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8, hbase.cluster.distributed=false 2024-11-14T09:31:17,836 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35271-0x10115d1302c0000, quorum=127.0.0.1:58079, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-14T09:31:17,836 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35271 2024-11-14T09:31:17,837 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35271 2024-11-14T09:31:17,837 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35271 2024-11-14T09:31:17,840 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35271 2024-11-14T09:31:17,840 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35271 2024-11-14T09:31:17,856 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/83f56b55f2af:0 server-side Connection retries=45 2024-11-14T09:31:17,856 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T09:31:17,856 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-14T09:31:17,856 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-14T09:31:17,856 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T09:31:17,856 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-14T09:31:17,856 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-14T09:31:17,857 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-14T09:31:17,856 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:31:17,857 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44797 2024-11-14T09:31:17,858 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:44797 connecting to ZooKeeper ensemble=127.0.0.1:58079 2024-11-14T09:31:17,859 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:31:17,861 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:31:17,865 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:447970x0, quorum=127.0.0.1:58079, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-14T09:31:17,865 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:447970x0, quorum=127.0.0.1:58079, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T09:31:17,865 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:44797-0x10115d1302c0001 connected 2024-11-14T09:31:17,865 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-14T09:31:17,866 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:31:17,868 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-14T09:31:17,868 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44797-0x10115d1302c0001, quorum=127.0.0.1:58079, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-14T09:31:17,869 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44797-0x10115d1302c0001, quorum=127.0.0.1:58079, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-14T09:31:17,872 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44797 2024-11-14T09:31:17,872 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44797 2024-11-14T09:31:17,872 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44797 2024-11-14T09:31:17,874 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44797 2024-11-14T09:31:17,874 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44797 2024-11-14T09:31:17,886 DEBUG [M:0;83f56b55f2af:35271 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;83f56b55f2af:35271 2024-11-14T09:31:17,887 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/83f56b55f2af,35271,1731576677802 2024-11-14T09:31:17,888 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35271-0x10115d1302c0000, quorum=127.0.0.1:58079, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T09:31:17,888 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44797-0x10115d1302c0001, quorum=127.0.0.1:58079, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T09:31:17,889 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35271-0x10115d1302c0000, quorum=127.0.0.1:58079, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/83f56b55f2af,35271,1731576677802 2024-11-14T09:31:17,891 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44797-0x10115d1302c0001, quorum=127.0.0.1:58079, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-14T09:31:17,891 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35271-0x10115d1302c0000, quorum=127.0.0.1:58079, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:31:17,891 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44797-0x10115d1302c0001, quorum=127.0.0.1:58079, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:31:17,891 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35271-0x10115d1302c0000, quorum=127.0.0.1:58079, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-14T09:31:17,892 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/83f56b55f2af,35271,1731576677802 from backup master directory 2024-11-14T09:31:17,894 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35271-0x10115d1302c0000, quorum=127.0.0.1:58079, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/83f56b55f2af,35271,1731576677802 2024-11-14T09:31:17,894 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44797-0x10115d1302c0001, quorum=127.0.0.1:58079, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T09:31:17,894 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35271-0x10115d1302c0000, quorum=127.0.0.1:58079, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T09:31:17,894 WARN [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-14T09:31:17,894 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=83f56b55f2af,35271,1731576677802 2024-11-14T09:31:17,898 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/hbase.id] with ID: 617799bd-c423-494f-86c3-485448819fee 2024-11-14T09:31:17,898 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/.tmp/hbase.id 2024-11-14T09:31:17,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37137 is added to blk_1073741826_1002 (size=42) 2024-11-14T09:31:17,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33525 is added to blk_1073741826_1002 (size=42) 2024-11-14T09:31:17,905 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/.tmp/hbase.id]:[hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/hbase.id] 2024-11-14T09:31:17,918 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:31:17,918 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-14T09:31:17,919 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-14T09:31:17,921 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35271-0x10115d1302c0000, quorum=127.0.0.1:58079, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:31:17,921 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44797-0x10115d1302c0001, quorum=127.0.0.1:58079, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:31:17,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33525 is added to blk_1073741827_1003 (size=196) 2024-11-14T09:31:17,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37137 is added to blk_1073741827_1003 (size=196) 2024-11-14T09:31:17,931 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-14T09:31:17,932 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-14T09:31:17,932 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T09:31:17,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37137 is added to blk_1073741828_1004 (size=1189) 2024-11-14T09:31:17,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33525 is added to blk_1073741828_1004 (size=1189) 2024-11-14T09:31:17,942 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/MasterData/data/master/store 2024-11-14T09:31:17,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37137 is added to blk_1073741829_1005 (size=34) 2024-11-14T09:31:17,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33525 is added to blk_1073741829_1005 (size=34) 2024-11-14T09:31:17,949 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T09:31:17,950 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-14T09:31:17,950 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:31:17,950 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:31:17,950 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-14T09:31:17,950 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:31:17,950 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:31:17,950 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731576677950Disabling compacts and flushes for region at 1731576677950Disabling writes for close at 1731576677950Writing region close event to WAL at 1731576677950Closed at 1731576677950 2024-11-14T09:31:17,950 WARN [master/83f56b55f2af:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/MasterData/data/master/store/.initializing 2024-11-14T09:31:17,951 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/MasterData/WALs/83f56b55f2af,35271,1731576677802 2024-11-14T09:31:17,953 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=83f56b55f2af%2C35271%2C1731576677802, suffix=, logDir=hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/MasterData/WALs/83f56b55f2af,35271,1731576677802, archiveDir=hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/MasterData/oldWALs, maxLogs=10 2024-11-14T09:31:17,954 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 83f56b55f2af%2C35271%2C1731576677802.1731576677953 2024-11-14T09:31:17,961 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/MasterData/WALs/83f56b55f2af,35271,1731576677802/83f56b55f2af%2C35271%2C1731576677802.1731576677953 2024-11-14T09:31:17,963 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33613:33613),(127.0.0.1/127.0.0.1:42497:42497)] 2024-11-14T09:31:17,964 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-14T09:31:17,964 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T09:31:17,964 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:31:17,964 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:31:17,968 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:31:17,970 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-14T09:31:17,970 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:31:17,970 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:31:17,971 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:31:17,972 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-14T09:31:17,972 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:31:17,973 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T09:31:17,973 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:31:17,974 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-14T09:31:17,974 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:31:17,974 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T09:31:17,975 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:31:17,976 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-14T09:31:17,976 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:31:17,976 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T09:31:17,976 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:31:17,977 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:31:17,978 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:31:17,979 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:31:17,979 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:31:17,979 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-14T09:31:17,980 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:31:17,982 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T09:31:17,983 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=877853, jitterRate=0.11624845862388611}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-14T09:31:17,983 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731576677964Initializing all the Stores at 1731576677965 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731576677965Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731576677968 (+3 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731576677968Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731576677968Cleaning up temporary data from old regions at 1731576677979 (+11 ms)Region opened successfully at 1731576677983 (+4 ms) 2024-11-14T09:31:17,985 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-14T09:31:17,988 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@cbb7499, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=83f56b55f2af/172.17.0.2:0 2024-11-14T09:31:17,989 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-14T09:31:17,989 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-14T09:31:17,989 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-14T09:31:17,989 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-14T09:31:17,990 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-14T09:31:17,990 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-14T09:31:17,990 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-14T09:31:17,992 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-14T09:31:17,993 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35271-0x10115d1302c0000, quorum=127.0.0.1:58079, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-14T09:31:17,994 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-14T09:31:17,995 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-14T09:31:17,995 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35271-0x10115d1302c0000, quorum=127.0.0.1:58079, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-14T09:31:17,997 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-14T09:31:17,997 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-14T09:31:17,998 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35271-0x10115d1302c0000, quorum=127.0.0.1:58079, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-14T09:31:18,001 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-14T09:31:18,002 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35271-0x10115d1302c0000, quorum=127.0.0.1:58079, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-14T09:31:18,004 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-14T09:31:18,006 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35271-0x10115d1302c0000, quorum=127.0.0.1:58079, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-14T09:31:18,007 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-14T09:31:18,009 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35271-0x10115d1302c0000, quorum=127.0.0.1:58079, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-14T09:31:18,009 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44797-0x10115d1302c0001, quorum=127.0.0.1:58079, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-14T09:31:18,009 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35271-0x10115d1302c0000, quorum=127.0.0.1:58079, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:31:18,009 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44797-0x10115d1302c0001, quorum=127.0.0.1:58079, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:31:18,010 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=83f56b55f2af,35271,1731576677802, sessionid=0x10115d1302c0000, setting cluster-up flag (Was=false) 2024-11-14T09:31:18,014 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35271-0x10115d1302c0000, quorum=127.0.0.1:58079, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:31:18,014 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44797-0x10115d1302c0001, quorum=127.0.0.1:58079, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:31:18,020 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-14T09:31:18,022 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=83f56b55f2af,35271,1731576677802 2024-11-14T09:31:18,025 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35271-0x10115d1302c0000, quorum=127.0.0.1:58079, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:31:18,025 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44797-0x10115d1302c0001, quorum=127.0.0.1:58079, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:31:18,032 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-14T09:31:18,033 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=83f56b55f2af,35271,1731576677802 2024-11-14T09:31:18,034 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-14T09:31:18,036 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-14T09:31:18,036 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-14T09:31:18,037 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-14T09:31:18,037 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 83f56b55f2af,35271,1731576677802 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-14T09:31:18,038 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/83f56b55f2af:0, corePoolSize=5, maxPoolSize=5 2024-11-14T09:31:18,038 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/83f56b55f2af:0, corePoolSize=5, maxPoolSize=5 2024-11-14T09:31:18,038 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/83f56b55f2af:0, corePoolSize=5, maxPoolSize=5 2024-11-14T09:31:18,038 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/83f56b55f2af:0, corePoolSize=5, maxPoolSize=5 2024-11-14T09:31:18,038 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/83f56b55f2af:0, corePoolSize=10, maxPoolSize=10 2024-11-14T09:31:18,038 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:31:18,038 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/83f56b55f2af:0, corePoolSize=2, maxPoolSize=2 2024-11-14T09:31:18,038 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:31:18,039 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731576708039 2024-11-14T09:31:18,040 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-14T09:31:18,040 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-14T09:31:18,040 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-14T09:31:18,040 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-14T09:31:18,040 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-14T09:31:18,040 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-14T09:31:18,040 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-14T09:31:18,040 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T09:31:18,040 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-14T09:31:18,041 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:31:18,041 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-14T09:31:18,044 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-14T09:31:18,045 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-14T09:31:18,045 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-14T09:31:18,046 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-14T09:31:18,046 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-14T09:31:18,046 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/83f56b55f2af:0:becomeActiveMaster-HFileCleaner.large.0-1731576678046,5,FailOnTimeoutGroup] 2024-11-14T09:31:18,046 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/83f56b55f2af:0:becomeActiveMaster-HFileCleaner.small.0-1731576678046,5,FailOnTimeoutGroup] 2024-11-14T09:31:18,046 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-14T09:31:18,046 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-14T09:31:18,046 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-14T09:31:18,046 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-14T09:31:18,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33525 is added to blk_1073741831_1007 (size=1321) 2024-11-14T09:31:18,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37137 is added to blk_1073741831_1007 (size=1321) 2024-11-14T09:31:18,055 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-14T09:31:18,055 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8 2024-11-14T09:31:18,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37137 is added to blk_1073741832_1008 (size=32) 2024-11-14T09:31:18,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33525 is added to blk_1073741832_1008 (size=32) 2024-11-14T09:31:18,065 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T09:31:18,066 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-14T09:31:18,068 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-14T09:31:18,068 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:31:18,068 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:31:18,068 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-14T09:31:18,070 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-14T09:31:18,070 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:31:18,070 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:31:18,070 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-14T09:31:18,071 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-14T09:31:18,071 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:31:18,072 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:31:18,072 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-14T09:31:18,073 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-14T09:31:18,073 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:31:18,074 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:31:18,074 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-14T09:31:18,074 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/data/hbase/meta/1588230740 2024-11-14T09:31:18,075 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/data/hbase/meta/1588230740 2024-11-14T09:31:18,076 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-14T09:31:18,076 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-14T09:31:18,076 INFO [RS:0;83f56b55f2af:44797 {}] regionserver.HRegionServer(746): ClusterId : 617799bd-c423-494f-86c3-485448819fee 2024-11-14T09:31:18,076 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-14T09:31:18,076 DEBUG [RS:0;83f56b55f2af:44797 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-14T09:31:18,078 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-14T09:31:18,079 DEBUG [RS:0;83f56b55f2af:44797 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-14T09:31:18,079 DEBUG [RS:0;83f56b55f2af:44797 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-14T09:31:18,080 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T09:31:18,080 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=880012, jitterRate=0.11899319291114807}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-14T09:31:18,081 DEBUG [RS:0;83f56b55f2af:44797 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-14T09:31:18,081 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731576678065Initializing all the Stores at 1731576678066 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731576678066Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731576678066Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731576678066Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731576678066Cleaning up temporary data from old regions at 1731576678076 (+10 ms)Region opened successfully at 1731576678081 (+5 ms) 2024-11-14T09:31:18,081 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-14T09:31:18,081 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-14T09:31:18,081 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-14T09:31:18,081 DEBUG [RS:0;83f56b55f2af:44797 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@507de358, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=83f56b55f2af/172.17.0.2:0 2024-11-14T09:31:18,081 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-14T09:31:18,081 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-14T09:31:18,082 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-14T09:31:18,082 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731576678081Disabling compacts and flushes for region at 1731576678081Disabling writes for close at 1731576678081Writing region close event to WAL at 1731576678082 (+1 ms)Closed at 1731576678082 2024-11-14T09:31:18,083 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T09:31:18,083 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-14T09:31:18,083 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-14T09:31:18,085 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-14T09:31:18,086 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-14T09:31:18,094 DEBUG [RS:0;83f56b55f2af:44797 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;83f56b55f2af:44797 2024-11-14T09:31:18,094 INFO [RS:0;83f56b55f2af:44797 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-14T09:31:18,094 INFO [RS:0;83f56b55f2af:44797 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-14T09:31:18,094 DEBUG [RS:0;83f56b55f2af:44797 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-14T09:31:18,095 INFO [RS:0;83f56b55f2af:44797 {}] regionserver.HRegionServer(2659): reportForDuty to master=83f56b55f2af,35271,1731576677802 with port=44797, startcode=1731576677856 2024-11-14T09:31:18,095 DEBUG [RS:0;83f56b55f2af:44797 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-14T09:31:18,097 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38961, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-11-14T09:31:18,098 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35271 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 83f56b55f2af,44797,1731576677856 2024-11-14T09:31:18,098 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35271 {}] master.ServerManager(517): Registering regionserver=83f56b55f2af,44797,1731576677856 2024-11-14T09:31:18,099 DEBUG [RS:0;83f56b55f2af:44797 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8 2024-11-14T09:31:18,099 DEBUG [RS:0;83f56b55f2af:44797 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:41755 2024-11-14T09:31:18,099 DEBUG [RS:0;83f56b55f2af:44797 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-14T09:31:18,102 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35271-0x10115d1302c0000, quorum=127.0.0.1:58079, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-14T09:31:18,102 DEBUG [RS:0;83f56b55f2af:44797 {}] zookeeper.ZKUtil(111): regionserver:44797-0x10115d1302c0001, quorum=127.0.0.1:58079, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/83f56b55f2af,44797,1731576677856 2024-11-14T09:31:18,102 WARN [RS:0;83f56b55f2af:44797 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-14T09:31:18,102 INFO [RS:0;83f56b55f2af:44797 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T09:31:18,102 DEBUG [RS:0;83f56b55f2af:44797 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/WALs/83f56b55f2af,44797,1731576677856 2024-11-14T09:31:18,102 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [83f56b55f2af,44797,1731576677856] 2024-11-14T09:31:18,105 INFO [RS:0;83f56b55f2af:44797 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-14T09:31:18,107 INFO [RS:0;83f56b55f2af:44797 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-14T09:31:18,107 INFO [RS:0;83f56b55f2af:44797 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-14T09:31:18,107 INFO [RS:0;83f56b55f2af:44797 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T09:31:18,108 INFO [RS:0;83f56b55f2af:44797 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-14T09:31:18,109 INFO [RS:0;83f56b55f2af:44797 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-14T09:31:18,109 INFO [RS:0;83f56b55f2af:44797 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-14T09:31:18,109 DEBUG [RS:0;83f56b55f2af:44797 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:31:18,109 DEBUG [RS:0;83f56b55f2af:44797 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:31:18,109 DEBUG [RS:0;83f56b55f2af:44797 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:31:18,109 DEBUG [RS:0;83f56b55f2af:44797 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:31:18,109 DEBUG [RS:0;83f56b55f2af:44797 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:31:18,109 DEBUG [RS:0;83f56b55f2af:44797 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/83f56b55f2af:0, corePoolSize=2, maxPoolSize=2 2024-11-14T09:31:18,109 DEBUG [RS:0;83f56b55f2af:44797 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:31:18,109 DEBUG [RS:0;83f56b55f2af:44797 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:31:18,109 DEBUG [RS:0;83f56b55f2af:44797 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:31:18,110 DEBUG [RS:0;83f56b55f2af:44797 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:31:18,110 DEBUG [RS:0;83f56b55f2af:44797 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:31:18,110 DEBUG [RS:0;83f56b55f2af:44797 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:31:18,110 DEBUG [RS:0;83f56b55f2af:44797 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/83f56b55f2af:0, corePoolSize=3, maxPoolSize=3 2024-11-14T09:31:18,110 DEBUG [RS:0;83f56b55f2af:44797 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/83f56b55f2af:0, corePoolSize=3, maxPoolSize=3 2024-11-14T09:31:18,111 INFO [RS:0;83f56b55f2af:44797 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-14T09:31:18,111 INFO [RS:0;83f56b55f2af:44797 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-14T09:31:18,111 INFO [RS:0;83f56b55f2af:44797 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T09:31:18,111 INFO [RS:0;83f56b55f2af:44797 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-14T09:31:18,111 INFO [RS:0;83f56b55f2af:44797 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-14T09:31:18,111 INFO [RS:0;83f56b55f2af:44797 {}] hbase.ChoreService(168): Chore ScheduledChore name=83f56b55f2af,44797,1731576677856-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-14T09:31:18,126 INFO [RS:0;83f56b55f2af:44797 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-14T09:31:18,126 INFO [RS:0;83f56b55f2af:44797 {}] hbase.ChoreService(168): Chore ScheduledChore name=83f56b55f2af,44797,1731576677856-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T09:31:18,126 INFO [RS:0;83f56b55f2af:44797 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T09:31:18,126 INFO [RS:0;83f56b55f2af:44797 {}] regionserver.Replication(171): 83f56b55f2af,44797,1731576677856 started 2024-11-14T09:31:18,140 INFO [RS:0;83f56b55f2af:44797 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T09:31:18,140 INFO [RS:0;83f56b55f2af:44797 {}] regionserver.HRegionServer(1482): Serving as 83f56b55f2af,44797,1731576677856, RpcServer on 83f56b55f2af/172.17.0.2:44797, sessionid=0x10115d1302c0001 2024-11-14T09:31:18,140 DEBUG [RS:0;83f56b55f2af:44797 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-14T09:31:18,140 DEBUG [RS:0;83f56b55f2af:44797 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 83f56b55f2af,44797,1731576677856 2024-11-14T09:31:18,140 DEBUG [RS:0;83f56b55f2af:44797 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '83f56b55f2af,44797,1731576677856' 2024-11-14T09:31:18,140 DEBUG [RS:0;83f56b55f2af:44797 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-14T09:31:18,141 DEBUG [RS:0;83f56b55f2af:44797 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-14T09:31:18,141 DEBUG [RS:0;83f56b55f2af:44797 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-14T09:31:18,141 DEBUG [RS:0;83f56b55f2af:44797 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-14T09:31:18,141 DEBUG [RS:0;83f56b55f2af:44797 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 83f56b55f2af,44797,1731576677856 2024-11-14T09:31:18,141 DEBUG [RS:0;83f56b55f2af:44797 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '83f56b55f2af,44797,1731576677856' 2024-11-14T09:31:18,141 DEBUG [RS:0;83f56b55f2af:44797 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-14T09:31:18,142 DEBUG [RS:0;83f56b55f2af:44797 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-14T09:31:18,142 DEBUG [RS:0;83f56b55f2af:44797 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-14T09:31:18,142 INFO [RS:0;83f56b55f2af:44797 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-14T09:31:18,142 INFO [RS:0;83f56b55f2af:44797 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-14T09:31:18,236 WARN [83f56b55f2af:35271 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-14T09:31:18,244 INFO [RS:0;83f56b55f2af:44797 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=83f56b55f2af%2C44797%2C1731576677856, suffix=, logDir=hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/WALs/83f56b55f2af,44797,1731576677856, archiveDir=hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/oldWALs, maxLogs=32 2024-11-14T09:31:18,245 INFO [RS:0;83f56b55f2af:44797 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 83f56b55f2af%2C44797%2C1731576677856.1731576678245 2024-11-14T09:31:18,251 INFO [RS:0;83f56b55f2af:44797 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/WALs/83f56b55f2af,44797,1731576677856/83f56b55f2af%2C44797%2C1731576677856.1731576678245 2024-11-14T09:31:18,252 DEBUG [RS:0;83f56b55f2af:44797 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33613:33613),(127.0.0.1/127.0.0.1:42497:42497)] 2024-11-14T09:31:18,486 DEBUG [83f56b55f2af:35271 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-14T09:31:18,487 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=83f56b55f2af,44797,1731576677856 2024-11-14T09:31:18,488 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 83f56b55f2af,44797,1731576677856, state=OPENING 2024-11-14T09:31:18,490 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-14T09:31:18,492 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35271-0x10115d1302c0000, quorum=127.0.0.1:58079, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:31:18,492 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44797-0x10115d1302c0001, quorum=127.0.0.1:58079, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:31:18,492 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-14T09:31:18,492 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T09:31:18,492 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T09:31:18,492 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=83f56b55f2af,44797,1731576677856}] 2024-11-14T09:31:18,645 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-14T09:31:18,647 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50095, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-14T09:31:18,651 INFO [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-14T09:31:18,651 INFO [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T09:31:18,652 INFO [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=83f56b55f2af%2C44797%2C1731576677856.meta, suffix=.meta, logDir=hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/WALs/83f56b55f2af,44797,1731576677856, archiveDir=hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/oldWALs, maxLogs=32 2024-11-14T09:31:18,653 INFO [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 83f56b55f2af%2C44797%2C1731576677856.meta.1731576678653.meta 2024-11-14T09:31:18,659 INFO [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/WALs/83f56b55f2af,44797,1731576677856/83f56b55f2af%2C44797%2C1731576677856.meta.1731576678653.meta 2024-11-14T09:31:18,663 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42497:42497),(127.0.0.1/127.0.0.1:33613:33613)] 2024-11-14T09:31:18,664 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-14T09:31:18,664 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-14T09:31:18,664 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-14T09:31:18,664 INFO [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-14T09:31:18,664 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-14T09:31:18,664 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T09:31:18,664 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-14T09:31:18,664 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-14T09:31:18,666 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-14T09:31:18,666 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-14T09:31:18,666 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:31:18,667 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:31:18,667 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-14T09:31:18,667 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-14T09:31:18,668 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:31:18,668 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:31:18,668 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-14T09:31:18,669 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-14T09:31:18,669 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:31:18,669 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:31:18,669 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-14T09:31:18,670 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-14T09:31:18,670 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:31:18,670 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:31:18,670 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-14T09:31:18,671 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/data/hbase/meta/1588230740 2024-11-14T09:31:18,672 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/data/hbase/meta/1588230740 2024-11-14T09:31:18,673 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-14T09:31:18,673 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-14T09:31:18,674 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-14T09:31:18,675 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-14T09:31:18,676 INFO [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=880956, jitterRate=0.12019392848014832}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-14T09:31:18,676 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-14T09:31:18,677 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731576678664Writing region info on filesystem at 1731576678664Initializing all the Stores at 1731576678665 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731576678665Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731576678665Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731576678665Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731576678665Cleaning up temporary data from old regions at 1731576678673 (+8 ms)Running coprocessor post-open hooks at 1731576678676 (+3 ms)Region opened successfully at 1731576678677 (+1 ms) 2024-11-14T09:31:18,678 INFO [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731576678645 2024-11-14T09:31:18,680 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-14T09:31:18,680 INFO [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-14T09:31:18,681 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=83f56b55f2af,44797,1731576677856 2024-11-14T09:31:18,683 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 83f56b55f2af,44797,1731576677856, state=OPEN 2024-11-14T09:31:18,690 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44797-0x10115d1302c0001, quorum=127.0.0.1:58079, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-14T09:31:18,690 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35271-0x10115d1302c0000, quorum=127.0.0.1:58079, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-14T09:31:18,690 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T09:31:18,690 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T09:31:18,690 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=83f56b55f2af,44797,1731576677856 2024-11-14T09:31:18,693 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-14T09:31:18,693 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=83f56b55f2af,44797,1731576677856 in 198 msec 2024-11-14T09:31:18,695 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-14T09:31:18,695 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 610 msec 2024-11-14T09:31:18,696 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T09:31:18,696 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-14T09:31:18,697 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-14T09:31:18,697 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=83f56b55f2af,44797,1731576677856, seqNum=-1] 2024-11-14T09:31:18,698 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-14T09:31:18,699 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52291, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-14T09:31:18,704 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 667 msec 2024-11-14T09:31:18,704 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731576678704, completionTime=-1 2024-11-14T09:31:18,704 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-14T09:31:18,704 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-14T09:31:18,706 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-14T09:31:18,706 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731576738706 2024-11-14T09:31:18,706 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731576798706 2024-11-14T09:31:18,706 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 1 msec 2024-11-14T09:31:18,706 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=83f56b55f2af,35271,1731576677802-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T09:31:18,706 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=83f56b55f2af,35271,1731576677802-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T09:31:18,706 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=83f56b55f2af,35271,1731576677802-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T09:31:18,706 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-83f56b55f2af:35271, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T09:31:18,706 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-14T09:31:18,706 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-14T09:31:18,708 DEBUG [master/83f56b55f2af:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-14T09:31:18,710 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.816sec 2024-11-14T09:31:18,710 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-14T09:31:18,710 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-14T09:31:18,710 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-14T09:31:18,710 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-14T09:31:18,710 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-14T09:31:18,710 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=83f56b55f2af,35271,1731576677802-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-14T09:31:18,710 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=83f56b55f2af,35271,1731576677802-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-14T09:31:18,712 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-14T09:31:18,712 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-14T09:31:18,712 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=83f56b55f2af,35271,1731576677802-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T09:31:18,777 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@57a169e4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T09:31:18,777 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 83f56b55f2af,35271,-1 for getting cluster id 2024-11-14T09:31:18,777 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-14T09:31:18,779 DEBUG [HMaster-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '617799bd-c423-494f-86c3-485448819fee' 2024-11-14T09:31:18,779 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-14T09:31:18,779 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "617799bd-c423-494f-86c3-485448819fee" 2024-11-14T09:31:18,779 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3b8a8083, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T09:31:18,779 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [83f56b55f2af,35271,-1] 2024-11-14T09:31:18,779 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-14T09:31:18,780 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T09:31:18,781 INFO [HMaster-EventLoopGroup-10-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49048, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-14T09:31:18,782 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7271f19d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T09:31:18,782 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-14T09:31:18,783 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=83f56b55f2af,44797,1731576677856, seqNum=-1] 2024-11-14T09:31:18,783 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-14T09:31:18,785 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42750, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-14T09:31:18,786 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=83f56b55f2af,35271,1731576677802 2024-11-14T09:31:18,786 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:31:18,789 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-14T09:31:18,789 INFO [Time-limited test {}] wal.TestLogRolling(320): Starting testLogRollOnPipelineRestart 2024-11-14T09:31:18,789 INFO [Time-limited test {}] wal.TestLogRolling(323): Replication=2 2024-11-14T09:31:18,789 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-14T09:31:18,790 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.AsyncConnectionImpl(321): The fetched master address is 83f56b55f2af,35271,1731576677802 2024-11-14T09:31:18,790 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@e76c628 2024-11-14T09:31:18,790 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-14T09:31:18,791 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49054, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-14T09:31:18,792 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35271 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-14T09:31:18,792 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35271 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-14T09:31:18,792 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35271 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnPipelineRestart', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-14T09:31:18,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35271 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart 2024-11-14T09:31:18,795 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_PRE_OPERATION 2024-11-14T09:31:18,795 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:31:18,795 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35271 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnPipelineRestart" procId is: 4 2024-11-14T09:31:18,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35271 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-14T09:31:18,796 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-14T09:31:18,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33525 is added to blk_1073741835_1011 (size=395) 2024-11-14T09:31:18,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37137 is added to blk_1073741835_1011 (size=395) 2024-11-14T09:31:18,804 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => cdabcd37ef79a147784768f5283ec7f0, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1731576678792.cdabcd37ef79a147784768f5283ec7f0.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnPipelineRestart', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8 2024-11-14T09:31:18,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33525 is added to blk_1073741836_1012 (size=78) 2024-11-14T09:31:18,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37137 is added to blk_1073741836_1012 (size=78) 2024-11-14T09:31:18,810 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1731576678792.cdabcd37ef79a147784768f5283ec7f0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T09:31:18,810 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1722): Closing cdabcd37ef79a147784768f5283ec7f0, disabling compactions & flushes 2024-11-14T09:31:18,810 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1731576678792.cdabcd37ef79a147784768f5283ec7f0. 2024-11-14T09:31:18,810 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731576678792.cdabcd37ef79a147784768f5283ec7f0. 2024-11-14T09:31:18,810 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731576678792.cdabcd37ef79a147784768f5283ec7f0. after waiting 0 ms 2024-11-14T09:31:18,810 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1731576678792.cdabcd37ef79a147784768f5283ec7f0. 2024-11-14T09:31:18,810 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731576678792.cdabcd37ef79a147784768f5283ec7f0. 2024-11-14T09:31:18,811 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1676): Region close journal for cdabcd37ef79a147784768f5283ec7f0: Waiting for close lock at 1731576678810Disabling compacts and flushes for region at 1731576678810Disabling writes for close at 1731576678810Writing region close event to WAL at 1731576678810Closed at 1731576678810 2024-11-14T09:31:18,812 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ADD_TO_META 2024-11-14T09:31:18,812 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnPipelineRestart,,1731576678792.cdabcd37ef79a147784768f5283ec7f0.","families":{"info":[{"qualifier":"regioninfo","vlen":77,"tag":[],"timestamp":"1731576678812"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731576678812"}]},"ts":"1731576678812"} 2024-11-14T09:31:18,815 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-14T09:31:18,816 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-14T09:31:18,816 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731576678816"}]},"ts":"1731576678816"} 2024-11-14T09:31:18,818 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLING in hbase:meta 2024-11-14T09:31:18,819 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=cdabcd37ef79a147784768f5283ec7f0, ASSIGN}] 2024-11-14T09:31:18,820 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=cdabcd37ef79a147784768f5283ec7f0, ASSIGN 2024-11-14T09:31:18,821 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=cdabcd37ef79a147784768f5283ec7f0, ASSIGN; state=OFFLINE, location=83f56b55f2af,44797,1731576677856; forceNewPlan=false, retain=false 2024-11-14T09:31:18,857 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:31:18,867 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:31:18,972 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=cdabcd37ef79a147784768f5283ec7f0, regionState=OPENING, regionLocation=83f56b55f2af,44797,1731576677856 2024-11-14T09:31:18,974 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=cdabcd37ef79a147784768f5283ec7f0, ASSIGN because future has completed 2024-11-14T09:31:18,975 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure cdabcd37ef79a147784768f5283ec7f0, server=83f56b55f2af,44797,1731576677856}] 2024-11-14T09:31:19,132 INFO [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnPipelineRestart,,1731576678792.cdabcd37ef79a147784768f5283ec7f0. 2024-11-14T09:31:19,132 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => cdabcd37ef79a147784768f5283ec7f0, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1731576678792.cdabcd37ef79a147784768f5283ec7f0.', STARTKEY => '', ENDKEY => ''} 2024-11-14T09:31:19,132 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnPipelineRestart cdabcd37ef79a147784768f5283ec7f0 2024-11-14T09:31:19,132 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1731576678792.cdabcd37ef79a147784768f5283ec7f0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T09:31:19,132 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for cdabcd37ef79a147784768f5283ec7f0 2024-11-14T09:31:19,132 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for cdabcd37ef79a147784768f5283ec7f0 2024-11-14T09:31:19,134 INFO [StoreOpener-cdabcd37ef79a147784768f5283ec7f0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region cdabcd37ef79a147784768f5283ec7f0 2024-11-14T09:31:19,135 INFO [StoreOpener-cdabcd37ef79a147784768f5283ec7f0-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region cdabcd37ef79a147784768f5283ec7f0 columnFamilyName info 2024-11-14T09:31:19,135 DEBUG [StoreOpener-cdabcd37ef79a147784768f5283ec7f0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:31:19,135 INFO [StoreOpener-cdabcd37ef79a147784768f5283ec7f0-1 {}] regionserver.HStore(327): Store=cdabcd37ef79a147784768f5283ec7f0/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T09:31:19,136 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for cdabcd37ef79a147784768f5283ec7f0 2024-11-14T09:31:19,136 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/data/default/TestLogRolling-testLogRollOnPipelineRestart/cdabcd37ef79a147784768f5283ec7f0 2024-11-14T09:31:19,136 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/data/default/TestLogRolling-testLogRollOnPipelineRestart/cdabcd37ef79a147784768f5283ec7f0 2024-11-14T09:31:19,137 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for cdabcd37ef79a147784768f5283ec7f0 2024-11-14T09:31:19,137 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for cdabcd37ef79a147784768f5283ec7f0 2024-11-14T09:31:19,138 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for cdabcd37ef79a147784768f5283ec7f0 2024-11-14T09:31:19,140 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/data/default/TestLogRolling-testLogRollOnPipelineRestart/cdabcd37ef79a147784768f5283ec7f0/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T09:31:19,141 INFO [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened cdabcd37ef79a147784768f5283ec7f0; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=689244, jitterRate=-0.12358105182647705}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-14T09:31:19,141 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for cdabcd37ef79a147784768f5283ec7f0 2024-11-14T09:31:19,141 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for cdabcd37ef79a147784768f5283ec7f0: Running coprocessor pre-open hook at 1731576679132Writing region info on filesystem at 1731576679132Initializing all the Stores at 1731576679133 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731576679133Cleaning up temporary data from old regions at 1731576679137 (+4 ms)Running coprocessor post-open hooks at 1731576679141 (+4 ms)Region opened successfully at 1731576679141 2024-11-14T09:31:19,142 INFO [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnPipelineRestart,,1731576678792.cdabcd37ef79a147784768f5283ec7f0., pid=6, masterSystemTime=1731576679128 2024-11-14T09:31:19,144 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnPipelineRestart,,1731576678792.cdabcd37ef79a147784768f5283ec7f0. 2024-11-14T09:31:19,145 INFO [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnPipelineRestart,,1731576678792.cdabcd37ef79a147784768f5283ec7f0. 2024-11-14T09:31:19,145 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=cdabcd37ef79a147784768f5283ec7f0, regionState=OPEN, openSeqNum=2, regionLocation=83f56b55f2af,44797,1731576677856 2024-11-14T09:31:19,148 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure cdabcd37ef79a147784768f5283ec7f0, server=83f56b55f2af,44797,1731576677856 because future has completed 2024-11-14T09:31:19,152 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-14T09:31:19,152 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure cdabcd37ef79a147784768f5283ec7f0, server=83f56b55f2af,44797,1731576677856 in 174 msec 2024-11-14T09:31:19,154 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-14T09:31:19,154 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=cdabcd37ef79a147784768f5283ec7f0, ASSIGN in 333 msec 2024-11-14T09:31:19,155 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-14T09:31:19,155 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731576679155"}]},"ts":"1731576679155"} 2024-11-14T09:31:19,157 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLED in hbase:meta 2024-11-14T09:31:19,158 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_POST_OPERATION 2024-11-14T09:31:19,160 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart in 366 msec 2024-11-14T09:31:19,858 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:31:19,867 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:31:20,858 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:31:20,868 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:31:21,793 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-14T09:31:21,794 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-14T09:31:21,794 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-14T09:31:21,794 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart Metrics about Tables on a single HBase RegionServer 2024-11-14T09:31:21,795 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-14T09:31:21,795 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-14T09:31:21,859 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:31:21,868 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:31:22,860 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:31:22,869 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:31:23,860 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:31:23,869 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:31:24,166 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-14T09:31:24,180 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:31:24,181 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:31:24,181 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:31:24,182 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:31:24,182 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:31:24,182 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:31:24,185 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:31:24,185 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:31:24,185 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:31:24,187 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:31:24,191 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-14T09:31:24,192 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnPipelineRestart' 2024-11-14T09:31:24,861 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:31:24,870 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:31:25,862 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:31:25,870 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:31:26,862 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:31:26,871 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:31:27,863 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:31:27,871 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:31:28,863 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:31:28,872 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:31:28,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35271 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-14T09:31:28,893 INFO [RPCClient-NioEventLoopGroup-4-15 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnPipelineRestart completed 2024-11-14T09:31:28,893 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnPipelineRestart,, stopping at row=TestLogRolling-testLogRollOnPipelineRestart ,, for max=2147483647 with caching=100 2024-11-14T09:31:28,897 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnPipelineRestart 2024-11-14T09:31:28,897 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnPipelineRestart,,1731576678792.cdabcd37ef79a147784768f5283ec7f0. 2024-11-14T09:31:28,900 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnPipelineRestart', row='row1002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnPipelineRestart,,1731576678792.cdabcd37ef79a147784768f5283ec7f0., hostname=83f56b55f2af,44797,1731576677856, seqNum=2] 2024-11-14T09:31:29,864 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:31:29,872 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:31:30,865 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:31:30,873 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:31:30,903 INFO [Time-limited test {}] wal.TestLogRolling(360): log.getCurrentFileName()): hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/WALs/83f56b55f2af,44797,1731576677856/83f56b55f2af%2C44797%2C1731576677856.1731576678245 2024-11-14T09:31:30,904 WARN [ResponseProcessor for block BP-875957633-172.17.0.2-1731576677118:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-875957633-172.17.0.2-1731576677118:blk_1073741830_1006 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:31:30,904 WARN [ResponseProcessor for block BP-875957633-172.17.0.2-1731576677118:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-875957633-172.17.0.2-1731576677118:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:31:30,904 WARN [ResponseProcessor for block BP-875957633-172.17.0.2-1731576677118:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-875957633-172.17.0.2-1731576677118:blk_1073741834_1010 java.io.IOException: Bad response ERROR for BP-875957633-172.17.0.2-1731576677118:blk_1073741834_1010 from datanode DatanodeInfoWithStorage[127.0.0.1:37137,DS-9ea4179b-ad22-4a7e-b55e-76544f1dc7b9,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:31:30,904 WARN [DataStreamer for file /user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/MasterData/WALs/83f56b55f2af,35271,1731576677802/83f56b55f2af%2C35271%2C1731576677802.1731576677953 block BP-875957633-172.17.0.2-1731576677118:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-875957633-172.17.0.2-1731576677118:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37137,DS-9ea4179b-ad22-4a7e-b55e-76544f1dc7b9,DISK], DatanodeInfoWithStorage[127.0.0.1:33525,DS-b632735b-7758-4a07-82d2-9408c786a0d0,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37137,DS-9ea4179b-ad22-4a7e-b55e-76544f1dc7b9,DISK]) is bad. 2024-11-14T09:31:30,904 WARN [DataStreamer for file /user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/WALs/83f56b55f2af,44797,1731576677856/83f56b55f2af%2C44797%2C1731576677856.1731576678245 block BP-875957633-172.17.0.2-1731576677118:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-875957633-172.17.0.2-1731576677118:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37137,DS-9ea4179b-ad22-4a7e-b55e-76544f1dc7b9,DISK], DatanodeInfoWithStorage[127.0.0.1:33525,DS-b632735b-7758-4a07-82d2-9408c786a0d0,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37137,DS-9ea4179b-ad22-4a7e-b55e-76544f1dc7b9,DISK]) is bad. 2024-11-14T09:31:30,905 WARN [DataStreamer for file /user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/WALs/83f56b55f2af,44797,1731576677856/83f56b55f2af%2C44797%2C1731576677856.meta.1731576678653.meta block BP-875957633-172.17.0.2-1731576677118:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-875957633-172.17.0.2-1731576677118:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33525,DS-b632735b-7758-4a07-82d2-9408c786a0d0,DISK], DatanodeInfoWithStorage[127.0.0.1:37137,DS-9ea4179b-ad22-4a7e-b55e-76544f1dc7b9,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37137,DS-9ea4179b-ad22-4a7e-b55e-76544f1dc7b9,DISK]) is bad. 2024-11-14T09:31:30,905 WARN [PacketResponder: BP-875957633-172.17.0.2-1731576677118:blk_1073741834_1010, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:37137] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:31:30,905 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-403344938_22 at /127.0.0.1:54908 [Receiving block BP-875957633-172.17.0.2-1731576677118:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:37137:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54908 dst: /127.0.0.1:37137 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:31:30,905 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1970735064_22 at /127.0.0.1:59380 [Receiving block BP-875957633-172.17.0.2-1731576677118:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:33525:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59380 dst: /127.0.0.1:33525 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:31:30,905 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-403344938_22 at /127.0.0.1:59336 [Receiving block BP-875957633-172.17.0.2-1731576677118:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:33525:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59336 dst: /127.0.0.1:33525 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:31:30,905 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1970735064_22 at /127.0.0.1:54938 [Receiving block BP-875957633-172.17.0.2-1731576677118:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:37137:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54938 dst: /127.0.0.1:37137 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:31:30,905 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1970735064_22 at /127.0.0.1:54950 [Receiving block BP-875957633-172.17.0.2-1731576677118:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:37137:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54950 dst: /127.0.0.1:37137 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:31:30,906 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1970735064_22 at /127.0.0.1:59364 [Receiving block BP-875957633-172.17.0.2-1731576677118:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:33525:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59364 dst: /127.0.0.1:33525 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:31:30,907 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@55bd346a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T09:31:30,907 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@15ee1d5a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T09:31:30,908 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T09:31:30,908 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@186da9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T09:31:30,908 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2b572e4f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c08a26b4-a2db-1d80-2bc0-28ffd8f80d88/hadoop.log.dir/,STOPPED} 2024-11-14T09:31:30,909 WARN [BP-875957633-172.17.0.2-1731576677118 heartbeating to localhost/127.0.0.1:41755 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T09:31:30,909 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T09:31:30,909 WARN [BP-875957633-172.17.0.2-1731576677118 heartbeating to localhost/127.0.0.1:41755 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-875957633-172.17.0.2-1731576677118 (Datanode Uuid e8426b3a-f727-4b4d-992e-3ca5f0bb3cdd) service to localhost/127.0.0.1:41755 2024-11-14T09:31:30,909 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T09:31:30,909 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c08a26b4-a2db-1d80-2bc0-28ffd8f80d88/cluster_930a5f69-a91f-75e3-0e17-a0ac7c392208/data/data3/current/BP-875957633-172.17.0.2-1731576677118 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T09:31:30,910 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c08a26b4-a2db-1d80-2bc0-28ffd8f80d88/cluster_930a5f69-a91f-75e3-0e17-a0ac7c392208/data/data4/current/BP-875957633-172.17.0.2-1731576677118 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T09:31:30,910 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T09:31:30,923 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T09:31:30,926 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T09:31:30,927 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T09:31:30,927 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T09:31:30,927 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-14T09:31:30,927 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@158e3255{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c08a26b4-a2db-1d80-2bc0-28ffd8f80d88/hadoop.log.dir/,AVAILABLE} 2024-11-14T09:31:30,928 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7b1c1354{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T09:31:31,041 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7164f4a6{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c08a26b4-a2db-1d80-2bc0-28ffd8f80d88/java.io.tmpdir/jetty-localhost-35779-hadoop-hdfs-3_4_1-tests_jar-_-any-2855837838924267187/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T09:31:31,041 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@47af5110{HTTP/1.1, (http/1.1)}{localhost:35779} 2024-11-14T09:31:31,041 INFO [Time-limited test {}] server.Server(415): Started @165228ms 2024-11-14T09:31:31,043 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T09:31:31,065 WARN [ResponseProcessor for block BP-875957633-172.17.0.2-1731576677118:blk_1073741830_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-875957633-172.17.0.2-1731576677118:blk_1073741830_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:31:31,065 WARN [ResponseProcessor for block BP-875957633-172.17.0.2-1731576677118:blk_1073741834_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-875957633-172.17.0.2-1731576677118:blk_1073741834_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:31:31,065 WARN [ResponseProcessor for block BP-875957633-172.17.0.2-1731576677118:blk_1073741833_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-875957633-172.17.0.2-1731576677118:blk_1073741833_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:31:31,066 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1970735064_22 at /127.0.0.1:49140 [Receiving block BP-875957633-172.17.0.2-1731576677118:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:33525:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49140 dst: /127.0.0.1:33525 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:31:31,066 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1970735064_22 at /127.0.0.1:49154 [Receiving block BP-875957633-172.17.0.2-1731576677118:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:33525:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49154 dst: /127.0.0.1:33525 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:31:31,066 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-403344938_22 at /127.0.0.1:49158 [Receiving block BP-875957633-172.17.0.2-1731576677118:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:33525:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49158 dst: /127.0.0.1:33525 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:31:31,069 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@36035128{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T09:31:31,070 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3d69d5d5{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T09:31:31,070 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T09:31:31,070 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@46a4b15f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T09:31:31,070 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@c75e827{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c08a26b4-a2db-1d80-2bc0-28ffd8f80d88/hadoop.log.dir/,STOPPED} 2024-11-14T09:31:31,071 WARN [BP-875957633-172.17.0.2-1731576677118 heartbeating to localhost/127.0.0.1:41755 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T09:31:31,071 WARN [BP-875957633-172.17.0.2-1731576677118 heartbeating to localhost/127.0.0.1:41755 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-875957633-172.17.0.2-1731576677118 (Datanode Uuid 200ac075-92e1-441f-a5ab-faef2c43db4c) service to localhost/127.0.0.1:41755 2024-11-14T09:31:31,072 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T09:31:31,072 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T09:31:31,073 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c08a26b4-a2db-1d80-2bc0-28ffd8f80d88/cluster_930a5f69-a91f-75e3-0e17-a0ac7c392208/data/data1/current/BP-875957633-172.17.0.2-1731576677118 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T09:31:31,073 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c08a26b4-a2db-1d80-2bc0-28ffd8f80d88/cluster_930a5f69-a91f-75e3-0e17-a0ac7c392208/data/data2/current/BP-875957633-172.17.0.2-1731576677118 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T09:31:31,073 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T09:31:31,083 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T09:31:31,086 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T09:31:31,087 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T09:31:31,087 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T09:31:31,087 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-14T09:31:31,087 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@569c23a1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c08a26b4-a2db-1d80-2bc0-28ffd8f80d88/hadoop.log.dir/,AVAILABLE} 2024-11-14T09:31:31,088 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5938f3f8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T09:31:31,165 WARN [Thread-1347 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T09:31:31,168 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe464e7f1380407ab with lease ID 0xeaa8b263883b3793: from storage DS-9ea4179b-ad22-4a7e-b55e-76544f1dc7b9 node DatanodeRegistration(127.0.0.1:44347, datanodeUuid=e8426b3a-f727-4b4d-992e-3ca5f0bb3cdd, infoPort=33589, infoSecurePort=0, ipcPort=42403, storageInfo=lv=-57;cid=testClusterID;nsid=745623789;c=1731576677118), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T09:31:31,168 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe464e7f1380407ab with lease ID 0xeaa8b263883b3793: from storage DS-612b11a0-9c2e-4e3b-9dc2-1bee62e764bd node DatanodeRegistration(127.0.0.1:44347, datanodeUuid=e8426b3a-f727-4b4d-992e-3ca5f0bb3cdd, infoPort=33589, infoSecurePort=0, ipcPort=42403, storageInfo=lv=-57;cid=testClusterID;nsid=745623789;c=1731576677118), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T09:31:31,235 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7c0079ba{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c08a26b4-a2db-1d80-2bc0-28ffd8f80d88/java.io.tmpdir/jetty-localhost-38627-hadoop-hdfs-3_4_1-tests_jar-_-any-3915713704513250971/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T09:31:31,235 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3b16a{HTTP/1.1, (http/1.1)}{localhost:38627} 2024-11-14T09:31:31,235 INFO [Time-limited test {}] server.Server(415): Started @165421ms 2024-11-14T09:31:31,236 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T09:31:31,317 WARN [Thread-1378 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T09:31:31,320 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc6c8943f6c372445 with lease ID 0xeaa8b263883b3794: from storage DS-b632735b-7758-4a07-82d2-9408c786a0d0 node DatanodeRegistration(127.0.0.1:44227, datanodeUuid=200ac075-92e1-441f-a5ab-faef2c43db4c, infoPort=35407, infoSecurePort=0, ipcPort=45037, storageInfo=lv=-57;cid=testClusterID;nsid=745623789;c=1731576677118), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T09:31:31,321 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc6c8943f6c372445 with lease ID 0xeaa8b263883b3794: from storage DS-f0ca00cd-fa32-435a-8053-46cebee01dae node DatanodeRegistration(127.0.0.1:44227, datanodeUuid=200ac075-92e1-441f-a5ab-faef2c43db4c, infoPort=35407, infoSecurePort=0, ipcPort=45037, storageInfo=lv=-57;cid=testClusterID;nsid=745623789;c=1731576677118), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-14T09:31:31,865 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:31:31,873 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:31:32,258 INFO [Time-limited test {}] wal.TestLogRolling(372): Data Nodes restarted 2024-11-14T09:31:32,260 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1002 2024-11-14T09:31:32,262 ERROR [FSHLog-0-hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8-prefix:83f56b55f2af,44797,1731576677856 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33525,DS-b632735b-7758-4a07-82d2-9408c786a0d0,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:31:32,262 WARN [FSHLog-0-hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8-prefix:83f56b55f2af,44797,1731576677856 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33525,DS-b632735b-7758-4a07-82d2-9408c786a0d0,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:31:32,262 DEBUG [regionserver/83f56b55f2af:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 83f56b55f2af%2C44797%2C1731576677856:(num 1731576678245) roll requested 2024-11-14T09:31:32,263 INFO [regionserver/83f56b55f2af:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 83f56b55f2af%2C44797%2C1731576677856.1731576692262 2024-11-14T09:31:32,268 DEBUG [regionserver/83f56b55f2af:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/WALs/83f56b55f2af,44797,1731576677856/83f56b55f2af%2C44797%2C1731576677856.1731576678245 newFile=hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/WALs/83f56b55f2af,44797,1731576677856/83f56b55f2af%2C44797%2C1731576677856.1731576692262 2024-11-14T09:31:32,268 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:31:32,268 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:31:32,269 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:31:32,269 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:31:32,269 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:31:32,269 INFO [regionserver/83f56b55f2af:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/WALs/83f56b55f2af,44797,1731576677856/83f56b55f2af%2C44797%2C1731576677856.1731576678245 with entries=2, filesize=1.59 KB; new WAL /user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/WALs/83f56b55f2af,44797,1731576677856/83f56b55f2af%2C44797%2C1731576677856.1731576692262 2024-11-14T09:31:32,269 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33525,DS-b632735b-7758-4a07-82d2-9408c786a0d0,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:31:32,269 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33525,DS-b632735b-7758-4a07-82d2-9408c786a0d0,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:31:32,269 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/WALs/83f56b55f2af,44797,1731576677856/83f56b55f2af%2C44797%2C1731576677856.1731576678245 2024-11-14T09:31:32,270 DEBUG [regionserver/83f56b55f2af:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35407:35407),(127.0.0.1/127.0.0.1:33589:33589)] 2024-11-14T09:31:32,270 DEBUG [regionserver/83f56b55f2af:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/WALs/83f56b55f2af,44797,1731576677856/83f56b55f2af%2C44797%2C1731576677856.1731576678245 is not closed yet, will try archiving it next time 2024-11-14T09:31:32,270 WARN [IPC Server handler 4 on default port 41755 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/WALs/83f56b55f2af,44797,1731576677856/83f56b55f2af%2C44797%2C1731576677856.1731576678245 has not been closed. Lease recovery is in progress. RecoveryId = 1017 for block blk_1073741833_1013 2024-11-14T09:31:32,270 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/WALs/83f56b55f2af,44797,1731576677856/83f56b55f2af%2C44797%2C1731576677856.1731576678245 after 1ms 2024-11-14T09:31:32,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44227 is added to blk_1073741833_1017 (size=1632) 2024-11-14T09:31:32,866 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:31:32,874 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:31:33,169 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741833_1013: GenerationStamp not matched, existing replica is blk_1073741833_1009 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-14T09:31:33,866 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:31:33,874 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:31:34,273 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1003 2024-11-14T09:31:34,867 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:31:34,875 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:31:35,868 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:31:35,875 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:31:36,271 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/WALs/83f56b55f2af,44797,1731576677856/83f56b55f2af%2C44797%2C1731576677856.1731576678245 after 4002ms 2024-11-14T09:31:36,276 WARN [ResponseProcessor for block BP-875957633-172.17.0.2-1731576677118:blk_1073741837_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-875957633-172.17.0.2-1731576677118:blk_1073741837_1016 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:31:36,276 WARN [DataStreamer for file /user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/WALs/83f56b55f2af,44797,1731576677856/83f56b55f2af%2C44797%2C1731576677856.1731576692262 block BP-875957633-172.17.0.2-1731576677118:blk_1073741837_1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-875957633-172.17.0.2-1731576677118:blk_1073741837_1016 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44227,DS-b632735b-7758-4a07-82d2-9408c786a0d0,DISK], DatanodeInfoWithStorage[127.0.0.1:44347,DS-9ea4179b-ad22-4a7e-b55e-76544f1dc7b9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44227,DS-b632735b-7758-4a07-82d2-9408c786a0d0,DISK]) is bad. 2024-11-14T09:31:36,276 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1970735064_22 at /127.0.0.1:46942 [Receiving block BP-875957633-172.17.0.2-1731576677118:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:44347:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46942 dst: /127.0.0.1:44347 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:31:36,276 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1970735064_22 at /127.0.0.1:41806 [Receiving block BP-875957633-172.17.0.2-1731576677118:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:44227:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41806 dst: /127.0.0.1:44227 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:31:36,278 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7c0079ba{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T09:31:36,278 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3b16a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T09:31:36,278 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T09:31:36,278 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5938f3f8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T09:31:36,279 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@569c23a1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c08a26b4-a2db-1d80-2bc0-28ffd8f80d88/hadoop.log.dir/,STOPPED} 2024-11-14T09:31:36,280 WARN [BP-875957633-172.17.0.2-1731576677118 heartbeating to localhost/127.0.0.1:41755 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T09:31:36,280 WARN [BP-875957633-172.17.0.2-1731576677118 heartbeating to localhost/127.0.0.1:41755 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-875957633-172.17.0.2-1731576677118 (Datanode Uuid 200ac075-92e1-441f-a5ab-faef2c43db4c) service to localhost/127.0.0.1:41755 2024-11-14T09:31:36,280 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T09:31:36,280 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T09:31:36,281 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c08a26b4-a2db-1d80-2bc0-28ffd8f80d88/cluster_930a5f69-a91f-75e3-0e17-a0ac7c392208/data/data1/current/BP-875957633-172.17.0.2-1731576677118 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T09:31:36,281 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c08a26b4-a2db-1d80-2bc0-28ffd8f80d88/cluster_930a5f69-a91f-75e3-0e17-a0ac7c392208/data/data2/current/BP-875957633-172.17.0.2-1731576677118 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T09:31:36,281 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T09:31:36,289 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T09:31:36,291 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T09:31:36,292 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T09:31:36,292 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T09:31:36,292 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-14T09:31:36,292 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6c2647c4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c08a26b4-a2db-1d80-2bc0-28ffd8f80d88/hadoop.log.dir/,AVAILABLE} 2024-11-14T09:31:36,292 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@a87b508{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T09:31:36,406 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1b49eb41{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c08a26b4-a2db-1d80-2bc0-28ffd8f80d88/java.io.tmpdir/jetty-localhost-34079-hadoop-hdfs-3_4_1-tests_jar-_-any-12369774673362020241/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T09:31:36,407 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7e766a12{HTTP/1.1, (http/1.1)}{localhost:34079} 2024-11-14T09:31:36,407 INFO [Time-limited test {}] server.Server(415): Started @170593ms 2024-11-14T09:31:36,408 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T09:31:36,429 WARN [ResponseProcessor for block BP-875957633-172.17.0.2-1731576677118:blk_1073741837_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-875957633-172.17.0.2-1731576677118:blk_1073741837_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:31:36,429 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1970735064_22 at /127.0.0.1:46954 [Receiving block BP-875957633-172.17.0.2-1731576677118:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:44347:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46954 dst: /127.0.0.1:44347 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:31:36,435 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7164f4a6{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T09:31:36,435 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@47af5110{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T09:31:36,435 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T09:31:36,435 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7b1c1354{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T09:31:36,435 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@158e3255{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c08a26b4-a2db-1d80-2bc0-28ffd8f80d88/hadoop.log.dir/,STOPPED} 2024-11-14T09:31:36,436 WARN [BP-875957633-172.17.0.2-1731576677118 heartbeating to localhost/127.0.0.1:41755 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T09:31:36,436 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T09:31:36,436 WARN [BP-875957633-172.17.0.2-1731576677118 heartbeating to localhost/127.0.0.1:41755 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-875957633-172.17.0.2-1731576677118 (Datanode Uuid e8426b3a-f727-4b4d-992e-3ca5f0bb3cdd) service to localhost/127.0.0.1:41755 2024-11-14T09:31:36,437 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T09:31:36,439 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c08a26b4-a2db-1d80-2bc0-28ffd8f80d88/cluster_930a5f69-a91f-75e3-0e17-a0ac7c392208/data/data3/current/BP-875957633-172.17.0.2-1731576677118 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T09:31:36,439 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c08a26b4-a2db-1d80-2bc0-28ffd8f80d88/cluster_930a5f69-a91f-75e3-0e17-a0ac7c392208/data/data4/current/BP-875957633-172.17.0.2-1731576677118 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T09:31:36,440 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T09:31:36,450 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T09:31:36,453 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T09:31:36,457 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T09:31:36,457 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T09:31:36,457 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-14T09:31:36,457 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@33b10502{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c08a26b4-a2db-1d80-2bc0-28ffd8f80d88/hadoop.log.dir/,AVAILABLE} 2024-11-14T09:31:36,458 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6b789ca1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T09:31:36,495 WARN [Thread-1421 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T09:31:36,497 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3cbfe4c8d76b4ee with lease ID 0xeaa8b263883b3795: from storage DS-b632735b-7758-4a07-82d2-9408c786a0d0 node DatanodeRegistration(127.0.0.1:42901, datanodeUuid=200ac075-92e1-441f-a5ab-faef2c43db4c, infoPort=38459, infoSecurePort=0, ipcPort=35699, storageInfo=lv=-57;cid=testClusterID;nsid=745623789;c=1731576677118), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T09:31:36,498 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3cbfe4c8d76b4ee with lease ID 0xeaa8b263883b3795: from storage DS-f0ca00cd-fa32-435a-8053-46cebee01dae node DatanodeRegistration(127.0.0.1:42901, datanodeUuid=200ac075-92e1-441f-a5ab-faef2c43db4c, infoPort=38459, infoSecurePort=0, ipcPort=35699, storageInfo=lv=-57;cid=testClusterID;nsid=745623789;c=1731576677118), blocks: 7, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-14T09:31:36,574 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@364c89a9{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c08a26b4-a2db-1d80-2bc0-28ffd8f80d88/java.io.tmpdir/jetty-localhost-37529-hadoop-hdfs-3_4_1-tests_jar-_-any-14821364143577299288/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T09:31:36,574 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@53853b60{HTTP/1.1, (http/1.1)}{localhost:37529} 2024-11-14T09:31:36,574 INFO [Time-limited test {}] server.Server(415): Started @170760ms 2024-11-14T09:31:36,576 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T09:31:36,662 WARN [Thread-1452 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T09:31:36,665 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2d81bed330308e13 with lease ID 0xeaa8b263883b3796: from storage DS-9ea4179b-ad22-4a7e-b55e-76544f1dc7b9 node DatanodeRegistration(127.0.0.1:46357, datanodeUuid=e8426b3a-f727-4b4d-992e-3ca5f0bb3cdd, infoPort=40577, infoSecurePort=0, ipcPort=38187, storageInfo=lv=-57;cid=testClusterID;nsid=745623789;c=1731576677118), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T09:31:36,665 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2d81bed330308e13 with lease ID 0xeaa8b263883b3796: from storage DS-612b11a0-9c2e-4e3b-9dc2-1bee62e764bd node DatanodeRegistration(127.0.0.1:46357, datanodeUuid=e8426b3a-f727-4b4d-992e-3ca5f0bb3cdd, infoPort=40577, infoSecurePort=0, ipcPort=38187, storageInfo=lv=-57;cid=testClusterID;nsid=745623789;c=1731576677118), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T09:31:36,868 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:31:36,876 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:31:37,594 INFO [Time-limited test {}] wal.TestLogRolling(389): Data Nodes restarted 2024-11-14T09:31:37,596 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1004 2024-11-14T09:31:37,597 ERROR [FSHLog-0-hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8-prefix:83f56b55f2af,44797,1731576677856 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44347,DS-9ea4179b-ad22-4a7e-b55e-76544f1dc7b9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:31:37,597 WARN [FSHLog-0-hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8-prefix:83f56b55f2af,44797,1731576677856 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44347,DS-9ea4179b-ad22-4a7e-b55e-76544f1dc7b9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:31:37,598 DEBUG [regionserver/83f56b55f2af:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 83f56b55f2af%2C44797%2C1731576677856:(num 1731576692262) roll requested 2024-11-14T09:31:37,598 INFO [regionserver/83f56b55f2af:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 83f56b55f2af%2C44797%2C1731576677856.1731576697598 2024-11-14T09:31:37,603 DEBUG [regionserver/83f56b55f2af:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/WALs/83f56b55f2af,44797,1731576677856/83f56b55f2af%2C44797%2C1731576677856.1731576692262 newFile=hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/WALs/83f56b55f2af,44797,1731576677856/83f56b55f2af%2C44797%2C1731576677856.1731576697598 2024-11-14T09:31:37,603 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:31:37,603 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:31:37,603 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:31:37,604 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:31:37,604 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:31:37,604 INFO [regionserver/83f56b55f2af:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/WALs/83f56b55f2af,44797,1731576677856/83f56b55f2af%2C44797%2C1731576677856.1731576692262 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/WALs/83f56b55f2af,44797,1731576677856/83f56b55f2af%2C44797%2C1731576677856.1731576697598 2024-11-14T09:31:37,604 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44347,DS-9ea4179b-ad22-4a7e-b55e-76544f1dc7b9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:31:37,604 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44347,DS-9ea4179b-ad22-4a7e-b55e-76544f1dc7b9,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:31:37,604 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/WALs/83f56b55f2af,44797,1731576677856/83f56b55f2af%2C44797%2C1731576677856.1731576692262 2024-11-14T09:31:37,605 WARN [IPC Server handler 1 on default port 41755 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/WALs/83f56b55f2af,44797,1731576677856/83f56b55f2af%2C44797%2C1731576677856.1731576692262 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741837_1018 2024-11-14T09:31:37,605 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/WALs/83f56b55f2af,44797,1731576677856/83f56b55f2af%2C44797%2C1731576677856.1731576692262 after 1ms 2024-11-14T09:31:37,607 DEBUG [regionserver/83f56b55f2af:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38459:38459),(127.0.0.1/127.0.0.1:40577:40577)] 2024-11-14T09:31:37,607 DEBUG [regionserver/83f56b55f2af:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/WALs/83f56b55f2af,44797,1731576677856/83f56b55f2af%2C44797%2C1731576677856.1731576692262 is not closed yet, will try archiving it next time 2024-11-14T09:31:37,869 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:31:37,876 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:31:38,869 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:31:38,877 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:31:39,608 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 83f56b55f2af%2C44797%2C1731576677856.1731576699608 2024-11-14T09:31:39,613 DEBUG [Time-limited test {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/WALs/83f56b55f2af,44797,1731576677856/83f56b55f2af%2C44797%2C1731576677856.1731576697598 newFile=hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/WALs/83f56b55f2af,44797,1731576677856/83f56b55f2af%2C44797%2C1731576677856.1731576699608 2024-11-14T09:31:39,614 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:31:39,614 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:31:39,614 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:31:39,614 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:31:39,614 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:31:39,614 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/WALs/83f56b55f2af,44797,1731576677856/83f56b55f2af%2C44797%2C1731576677856.1731576697598 with entries=1, filesize=1.23 KB; new WAL /user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/WALs/83f56b55f2af,44797,1731576677856/83f56b55f2af%2C44797%2C1731576677856.1731576699608 2024-11-14T09:31:39,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46357 is added to blk_1073741838_1019 (size=1264) 2024-11-14T09:31:39,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42901 is added to blk_1073741838_1019 (size=1264) 2024-11-14T09:31:39,617 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/WALs/83f56b55f2af,44797,1731576677856/83f56b55f2af%2C44797%2C1731576677856.1731576692262 is not closed yet, will try archiving it next time 2024-11-14T09:31:39,617 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38459:38459),(127.0.0.1/127.0.0.1:40577:40577)] 2024-11-14T09:31:39,617 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/WALs/83f56b55f2af,44797,1731576677856/83f56b55f2af%2C44797%2C1731576677856.1731576692262 is not closed yet, will try archiving it next time 2024-11-14T09:31:39,617 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/WALs/83f56b55f2af,44797,1731576677856/83f56b55f2af%2C44797%2C1731576677856.1731576678245 2024-11-14T09:31:39,617 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/WALs/83f56b55f2af,44797,1731576677856/83f56b55f2af%2C44797%2C1731576677856.1731576678245 2024-11-14T09:31:39,618 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/WALs/83f56b55f2af,44797,1731576677856/83f56b55f2af%2C44797%2C1731576677856.1731576678245 after 1ms 2024-11-14T09:31:39,618 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/WALs/83f56b55f2af,44797,1731576677856/83f56b55f2af%2C44797%2C1731576677856.1731576678245 2024-11-14T09:31:39,627 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1731576679141/Put/vlen=218/seqid=0] 2024-11-14T09:31:39,627 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #4: [row1002/info:/1731576688902/Put/vlen=1045/seqid=0] 2024-11-14T09:31:39,628 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/WALs/83f56b55f2af,44797,1731576677856/83f56b55f2af%2C44797%2C1731576677856.1731576678245 2024-11-14T09:31:39,628 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/WALs/83f56b55f2af,44797,1731576677856/83f56b55f2af%2C44797%2C1731576677856.1731576692262 2024-11-14T09:31:39,628 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/WALs/83f56b55f2af,44797,1731576677856/83f56b55f2af%2C44797%2C1731576677856.1731576692262 2024-11-14T09:31:39,628 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/WALs/83f56b55f2af,44797,1731576677856/83f56b55f2af%2C44797%2C1731576677856.1731576692262 after 0ms 2024-11-14T09:31:39,628 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/WALs/83f56b55f2af,44797,1731576677856/83f56b55f2af%2C44797%2C1731576677856.1731576692262 2024-11-14T09:31:39,631 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #5: [row1003/info:/1731576692262/Put/vlen=1045/seqid=0] 2024-11-14T09:31:39,631 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #6: [row1004/info:/1731576694274/Put/vlen=1045/seqid=0] 2024-11-14T09:31:39,631 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/WALs/83f56b55f2af,44797,1731576677856/83f56b55f2af%2C44797%2C1731576677856.1731576692262 2024-11-14T09:31:39,631 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/WALs/83f56b55f2af,44797,1731576677856/83f56b55f2af%2C44797%2C1731576677856.1731576697598 2024-11-14T09:31:39,631 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/WALs/83f56b55f2af,44797,1731576677856/83f56b55f2af%2C44797%2C1731576677856.1731576697598 2024-11-14T09:31:39,632 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/WALs/83f56b55f2af,44797,1731576677856/83f56b55f2af%2C44797%2C1731576677856.1731576697598 after 1ms 2024-11-14T09:31:39,632 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/WALs/83f56b55f2af,44797,1731576677856/83f56b55f2af%2C44797%2C1731576677856.1731576697598 2024-11-14T09:31:39,635 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #7: [row1005/info:/1731576697597/Put/vlen=1045/seqid=0] 2024-11-14T09:31:39,635 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/WALs/83f56b55f2af,44797,1731576677856/83f56b55f2af%2C44797%2C1731576677856.1731576699608 2024-11-14T09:31:39,635 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/WALs/83f56b55f2af,44797,1731576677856/83f56b55f2af%2C44797%2C1731576677856.1731576699608 2024-11-14T09:31:39,635 WARN [IPC Server handler 2 on default port 41755 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/WALs/83f56b55f2af,44797,1731576677856/83f56b55f2af%2C44797%2C1731576677856.1731576699608 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741839_1021 2024-11-14T09:31:39,635 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/WALs/83f56b55f2af,44797,1731576677856/83f56b55f2af%2C44797%2C1731576677856.1731576699608 after 0ms 2024-11-14T09:31:39,870 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:31:39,877 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:31:40,499 WARN [ResponseProcessor for block BP-875957633-172.17.0.2-1731576677118:blk_1073741839_1021 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-875957633-172.17.0.2-1731576677118:blk_1073741839_1021 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:31:40,499 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-403344938_22 at /127.0.0.1:52642 [Receiving block BP-875957633-172.17.0.2-1731576677118:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:42901:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52642 dst: /127.0.0.1:42901 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:42901 remote=/127.0.0.1:52642]. Total timeout mills is 60000, 59114 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:31:40,500 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-403344938_22 at /127.0.0.1:34524 [Receiving block BP-875957633-172.17.0.2-1731576677118:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:46357:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34524 dst: /127.0.0.1:46357 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:31:40,500 WARN [DataStreamer for file /user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/WALs/83f56b55f2af,44797,1731576677856/83f56b55f2af%2C44797%2C1731576677856.1731576699608 block BP-875957633-172.17.0.2-1731576677118:blk_1073741839_1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-875957633-172.17.0.2-1731576677118:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42901,DS-b632735b-7758-4a07-82d2-9408c786a0d0,DISK], DatanodeInfoWithStorage[127.0.0.1:46357,DS-9ea4179b-ad22-4a7e-b55e-76544f1dc7b9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42901,DS-b632735b-7758-4a07-82d2-9408c786a0d0,DISK]) is bad. 2024-11-14T09:31:40,501 WARN [DataStreamer for file /user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/WALs/83f56b55f2af,44797,1731576677856/83f56b55f2af%2C44797%2C1731576677856.1731576699608 block BP-875957633-172.17.0.2-1731576677118:blk_1073741839_1021 {}] hdfs.DataStreamer(859): DataStreamer Exception org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-875957633-172.17.0.2-1731576677118:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:31:40,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42901 is added to blk_1073741839_1022 (size=85) 2024-11-14T09:31:40,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46357 is added to blk_1073741839_1022 (size=85) 2024-11-14T09:31:40,870 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:31:40,878 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:31:41,498 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741837_1018: GenerationStamp not matched, existing replica is blk_1073741837_1016 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-14T09:31:41,606 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/WALs/83f56b55f2af,44797,1731576677856/83f56b55f2af%2C44797%2C1731576677856.1731576692262 after 4002ms 2024-11-14T09:31:41,871 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:31:41,878 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:31:42,872 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:31:42,878 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:31:43,636 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/WALs/83f56b55f2af,44797,1731576677856/83f56b55f2af%2C44797%2C1731576677856.1731576699608 after 4001ms 2024-11-14T09:31:43,636 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/WALs/83f56b55f2af,44797,1731576677856/83f56b55f2af%2C44797%2C1731576677856.1731576699608 2024-11-14T09:31:43,639 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/WALs/83f56b55f2af,44797,1731576677856/83f56b55f2af%2C44797%2C1731576677856.1731576699608 2024-11-14T09:31:43,640 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.74 KB heapSize=3.77 KB 2024-11-14T09:31:43,640 ERROR [FSHLog-0-hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8-prefix:83f56b55f2af,44797,1731576677856.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33525,DS-b632735b-7758-4a07-82d2-9408c786a0d0,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:31:43,640 WARN [FSHLog-0-hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8-prefix:83f56b55f2af,44797,1731576677856.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33525,DS-b632735b-7758-4a07-82d2-9408c786a0d0,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:31:43,641 DEBUG [regionserver/83f56b55f2af:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 83f56b55f2af%2C44797%2C1731576677856.meta:.meta(num 1731576678653) roll requested 2024-11-14T09:31:43,641 INFO [regionserver/83f56b55f2af:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 83f56b55f2af%2C44797%2C1731576677856.meta.1731576703641.meta 2024-11-14T09:31:43,646 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:31:43,646 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:31:43,646 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:31:43,646 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:31:43,646 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:31:43,646 INFO [regionserver/83f56b55f2af:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/WALs/83f56b55f2af,44797,1731576677856/83f56b55f2af%2C44797%2C1731576677856.meta.1731576678653.meta with entries=8, filesize=2.36 KB; new WAL /user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/WALs/83f56b55f2af,44797,1731576677856/83f56b55f2af%2C44797%2C1731576677856.meta.1731576703641.meta 2024-11-14T09:31:43,647 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33525,DS-b632735b-7758-4a07-82d2-9408c786a0d0,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:31:43,647 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33525,DS-b632735b-7758-4a07-82d2-9408c786a0d0,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:31:43,647 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/WALs/83f56b55f2af,44797,1731576677856/83f56b55f2af%2C44797%2C1731576677856.meta.1731576678653.meta 2024-11-14T09:31:43,648 DEBUG [regionserver/83f56b55f2af:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38459:38459),(127.0.0.1/127.0.0.1:40577:40577)] 2024-11-14T09:31:43,648 DEBUG [regionserver/83f56b55f2af:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/WALs/83f56b55f2af,44797,1731576677856/83f56b55f2af%2C44797%2C1731576677856.meta.1731576678653.meta is not closed yet, will try archiving it next time 2024-11-14T09:31:43,648 WARN [IPC Server handler 4 on default port 41755 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/WALs/83f56b55f2af,44797,1731576677856/83f56b55f2af%2C44797%2C1731576677856.meta.1731576678653.meta has not been closed. Lease recovery is in progress. RecoveryId = 1024 for block blk_1073741834_1014 2024-11-14T09:31:43,648 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/WALs/83f56b55f2af,44797,1731576677856/83f56b55f2af%2C44797%2C1731576677856.meta.1731576678653.meta after 1ms 2024-11-14T09:31:43,663 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/data/hbase/meta/1588230740/.tmp/info/bd5d0cd01c6c4630aa6e38b050b3866d is 207, key is TestLogRolling-testLogRollOnPipelineRestart,,1731576678792.cdabcd37ef79a147784768f5283ec7f0./info:regioninfo/1731576679145/Put/seqid=0 2024-11-14T09:31:43,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42901 is added to blk_1073741841_1025 (size=7125) 2024-11-14T09:31:43,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46357 is added to blk_1073741841_1025 (size=7125) 2024-11-14T09:31:43,669 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.52 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/data/hbase/meta/1588230740/.tmp/info/bd5d0cd01c6c4630aa6e38b050b3866d 2024-11-14T09:31:43,688 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/data/hbase/meta/1588230740/.tmp/ns/70df41fb40cb4e4db973b1fadd956224 is 43, key is default/ns:d/1731576678699/Put/seqid=0 2024-11-14T09:31:43,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46357 is added to blk_1073741842_1026 (size=5153) 2024-11-14T09:31:43,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42901 is added to blk_1073741842_1026 (size=5153) 2024-11-14T09:31:43,694 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/data/hbase/meta/1588230740/.tmp/ns/70df41fb40cb4e4db973b1fadd956224 2024-11-14T09:31:43,712 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/data/hbase/meta/1588230740/.tmp/table/d50093349e1e4f18930461b3be3ac844 is 79, key is TestLogRolling-testLogRollOnPipelineRestart/table:state/1731576679155/Put/seqid=0 2024-11-14T09:31:43,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46357 is added to blk_1073741843_1027 (size=5438) 2024-11-14T09:31:43,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42901 is added to blk_1073741843_1027 (size=5438) 2024-11-14T09:31:43,717 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=150 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/data/hbase/meta/1588230740/.tmp/table/d50093349e1e4f18930461b3be3ac844 2024-11-14T09:31:43,723 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/data/hbase/meta/1588230740/.tmp/info/bd5d0cd01c6c4630aa6e38b050b3866d as hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/data/hbase/meta/1588230740/info/bd5d0cd01c6c4630aa6e38b050b3866d 2024-11-14T09:31:43,727 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/data/hbase/meta/1588230740/info/bd5d0cd01c6c4630aa6e38b050b3866d, entries=10, sequenceid=11, filesize=7.0 K 2024-11-14T09:31:43,728 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/data/hbase/meta/1588230740/.tmp/ns/70df41fb40cb4e4db973b1fadd956224 as hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/data/hbase/meta/1588230740/ns/70df41fb40cb4e4db973b1fadd956224 2024-11-14T09:31:43,733 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/data/hbase/meta/1588230740/ns/70df41fb40cb4e4db973b1fadd956224, entries=2, sequenceid=11, filesize=5.0 K 2024-11-14T09:31:43,734 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/data/hbase/meta/1588230740/.tmp/table/d50093349e1e4f18930461b3be3ac844 as hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/data/hbase/meta/1588230740/table/d50093349e1e4f18930461b3be3ac844 2024-11-14T09:31:43,739 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/data/hbase/meta/1588230740/table/d50093349e1e4f18930461b3be3ac844, entries=2, sequenceid=11, filesize=5.3 K 2024-11-14T09:31:43,740 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.74 KB/1782, heapSize ~3.48 KB/3560, currentSize=0 B/0 for 1588230740 in 100ms, sequenceid=11, compaction requested=false 2024-11-14T09:31:43,741 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-14T09:31:43,741 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing cdabcd37ef79a147784768f5283ec7f0 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-11-14T09:31:43,741 ERROR [FSHLog-0-hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8-prefix:83f56b55f2af,44797,1731576677856 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-875957633-172.17.0.2-1731576677118:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:31:43,741 WARN [FSHLog-0-hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8-prefix:83f56b55f2af,44797,1731576677856 {}] wal.AbstractFSWAL(2174): append entry failed org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-875957633-172.17.0.2-1731576677118:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:31:43,742 DEBUG [regionserver/83f56b55f2af:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 83f56b55f2af%2C44797%2C1731576677856:(num 1731576699608) roll requested 2024-11-14T09:31:43,742 INFO [regionserver/83f56b55f2af:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 83f56b55f2af%2C44797%2C1731576677856.1731576703742 2024-11-14T09:31:43,747 DEBUG [regionserver/83f56b55f2af:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/WALs/83f56b55f2af,44797,1731576677856/83f56b55f2af%2C44797%2C1731576677856.1731576699608 newFile=hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/WALs/83f56b55f2af,44797,1731576677856/83f56b55f2af%2C44797%2C1731576677856.1731576703742 2024-11-14T09:31:43,747 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:31:43,747 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:31:43,747 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:31:43,747 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:31:43,747 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:31:43,747 INFO [regionserver/83f56b55f2af:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/WALs/83f56b55f2af,44797,1731576677856/83f56b55f2af%2C44797%2C1731576677856.1731576699608 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/WALs/83f56b55f2af,44797,1731576677856/83f56b55f2af%2C44797%2C1731576677856.1731576703742 2024-11-14T09:31:43,748 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-875957633-172.17.0.2-1731576677118:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:31:43,748 DEBUG [regionserver/83f56b55f2af:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40577:40577),(127.0.0.1/127.0.0.1:38459:38459)] 2024-11-14T09:31:43,748 DEBUG [regionserver/83f56b55f2af:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/WALs/83f56b55f2af,44797,1731576677856/83f56b55f2af%2C44797%2C1731576677856.1731576699608 is not closed yet, will try archiving it next time 2024-11-14T09:31:43,748 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-875957633-172.17.0.2-1731576677118:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:31:43,748 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/WALs/83f56b55f2af,44797,1731576677856/83f56b55f2af%2C44797%2C1731576677856.1731576699608 2024-11-14T09:31:43,749 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/WALs/83f56b55f2af,44797,1731576677856/83f56b55f2af%2C44797%2C1731576677856.1731576699608 after 1ms 2024-11-14T09:31:43,750 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/WALs/83f56b55f2af,44797,1731576677856/83f56b55f2af%2C44797%2C1731576677856.1731576699608 to hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/oldWALs/83f56b55f2af%2C44797%2C1731576677856.1731576699608 2024-11-14T09:31:43,766 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/data/default/TestLogRolling-testLogRollOnPipelineRestart/cdabcd37ef79a147784768f5283ec7f0/.tmp/info/68f637437743453a976bef233fad8092 is 1080, key is row1002/info:/1731576688902/Put/seqid=0 2024-11-14T09:31:43,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42901 is added to blk_1073741845_1029 (size=9270) 2024-11-14T09:31:43,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46357 is added to blk_1073741845_1029 (size=9270) 2024-11-14T09:31:43,772 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/data/default/TestLogRolling-testLogRollOnPipelineRestart/cdabcd37ef79a147784768f5283ec7f0/.tmp/info/68f637437743453a976bef233fad8092 2024-11-14T09:31:43,777 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/data/default/TestLogRolling-testLogRollOnPipelineRestart/cdabcd37ef79a147784768f5283ec7f0/.tmp/info/68f637437743453a976bef233fad8092 as hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/data/default/TestLogRolling-testLogRollOnPipelineRestart/cdabcd37ef79a147784768f5283ec7f0/info/68f637437743453a976bef233fad8092 2024-11-14T09:31:43,782 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/data/default/TestLogRolling-testLogRollOnPipelineRestart/cdabcd37ef79a147784768f5283ec7f0/info/68f637437743453a976bef233fad8092, entries=4, sequenceid=8, filesize=9.1 K 2024-11-14T09:31:43,783 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for cdabcd37ef79a147784768f5283ec7f0 in 42ms, sequenceid=8, compaction requested=false 2024-11-14T09:31:43,783 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for cdabcd37ef79a147784768f5283ec7f0: 2024-11-14T09:31:43,789 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-14T09:31:43,789 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-14T09:31:43,789 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T09:31:43,789 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T09:31:43,789 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T09:31:43,789 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-14T09:31:43,790 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-14T09:31:43,790 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=26903972, stopped=false 2024-11-14T09:31:43,790 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=83f56b55f2af,35271,1731576677802 2024-11-14T09:31:43,792 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44797-0x10115d1302c0001, quorum=127.0.0.1:58079, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-14T09:31:43,792 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35271-0x10115d1302c0000, quorum=127.0.0.1:58079, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-14T09:31:43,792 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44797-0x10115d1302c0001, quorum=127.0.0.1:58079, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:31:43,792 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35271-0x10115d1302c0000, quorum=127.0.0.1:58079, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:31:43,792 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-14T09:31:43,792 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-14T09:31:43,792 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T09:31:43,792 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T09:31:43,793 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '83f56b55f2af,44797,1731576677856' ***** 2024-11-14T09:31:43,793 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-14T09:31:43,793 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:35271-0x10115d1302c0000, quorum=127.0.0.1:58079, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T09:31:43,793 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:44797-0x10115d1302c0001, quorum=127.0.0.1:58079, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T09:31:43,793 INFO [RS:0;83f56b55f2af:44797 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-14T09:31:43,793 INFO [RS:0;83f56b55f2af:44797 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-14T09:31:43,793 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-14T09:31:43,793 INFO [RS:0;83f56b55f2af:44797 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-14T09:31:43,793 INFO [RS:0;83f56b55f2af:44797 {}] regionserver.HRegionServer(3091): Received CLOSE for cdabcd37ef79a147784768f5283ec7f0 2024-11-14T09:31:43,794 INFO [RS:0;83f56b55f2af:44797 {}] regionserver.HRegionServer(959): stopping server 83f56b55f2af,44797,1731576677856 2024-11-14T09:31:43,794 INFO [RS:0;83f56b55f2af:44797 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-14T09:31:43,794 INFO [RS:0;83f56b55f2af:44797 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;83f56b55f2af:44797. 2024-11-14T09:31:43,794 DEBUG [RS:0;83f56b55f2af:44797 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T09:31:43,794 DEBUG [RS:0;83f56b55f2af:44797 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T09:31:43,794 DEBUG [RS_CLOSE_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing cdabcd37ef79a147784768f5283ec7f0, disabling compactions & flushes 2024-11-14T09:31:43,794 INFO [RS:0;83f56b55f2af:44797 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-14T09:31:43,794 INFO [RS:0;83f56b55f2af:44797 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-14T09:31:43,794 INFO [RS:0;83f56b55f2af:44797 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-14T09:31:43,794 INFO [RS_CLOSE_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1731576678792.cdabcd37ef79a147784768f5283ec7f0. 2024-11-14T09:31:43,794 INFO [RS:0;83f56b55f2af:44797 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-14T09:31:43,794 DEBUG [RS_CLOSE_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731576678792.cdabcd37ef79a147784768f5283ec7f0. 2024-11-14T09:31:43,794 DEBUG [RS_CLOSE_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731576678792.cdabcd37ef79a147784768f5283ec7f0. after waiting 0 ms 2024-11-14T09:31:43,794 DEBUG [RS_CLOSE_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1731576678792.cdabcd37ef79a147784768f5283ec7f0. 2024-11-14T09:31:43,794 INFO [RS:0;83f56b55f2af:44797 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-14T09:31:43,794 DEBUG [RS:0;83f56b55f2af:44797 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, cdabcd37ef79a147784768f5283ec7f0=TestLogRolling-testLogRollOnPipelineRestart,,1731576678792.cdabcd37ef79a147784768f5283ec7f0.} 2024-11-14T09:31:43,794 DEBUG [RS:0;83f56b55f2af:44797 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, cdabcd37ef79a147784768f5283ec7f0 2024-11-14T09:31:43,794 DEBUG [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-14T09:31:43,794 INFO [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-14T09:31:43,794 DEBUG [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-14T09:31:43,794 DEBUG [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-14T09:31:43,794 DEBUG [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-14T09:31:43,799 DEBUG [RS_CLOSE_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/data/default/TestLogRolling-testLogRollOnPipelineRestart/cdabcd37ef79a147784768f5283ec7f0/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=1 2024-11-14T09:31:43,799 DEBUG [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-14T09:31:43,800 INFO [RS_CLOSE_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731576678792.cdabcd37ef79a147784768f5283ec7f0. 2024-11-14T09:31:43,800 DEBUG [RS_CLOSE_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for cdabcd37ef79a147784768f5283ec7f0: Waiting for close lock at 1731576703794Running coprocessor pre-close hooks at 1731576703794Disabling compacts and flushes for region at 1731576703794Disabling writes for close at 1731576703794Writing region close event to WAL at 1731576703795 (+1 ms)Running coprocessor post-close hooks at 1731576703800 (+5 ms)Closed at 1731576703800 2024-11-14T09:31:43,800 DEBUG [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-14T09:31:43,800 DEBUG [RS_CLOSE_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731576678792.cdabcd37ef79a147784768f5283ec7f0. 2024-11-14T09:31:43,800 INFO [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-14T09:31:43,800 DEBUG [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731576703794Running coprocessor pre-close hooks at 1731576703794Disabling compacts and flushes for region at 1731576703794Disabling writes for close at 1731576703794Writing region close event to WAL at 1731576703796 (+2 ms)Running coprocessor post-close hooks at 1731576703800 (+4 ms)Closed at 1731576703800 2024-11-14T09:31:43,800 DEBUG [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-14T09:31:43,872 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:31:43,879 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:31:43,995 INFO [RS:0;83f56b55f2af:44797 {}] regionserver.HRegionServer(976): stopping server 83f56b55f2af,44797,1731576677856; all regions closed. 2024-11-14T09:31:43,995 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:31:43,995 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:31:43,995 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:31:43,995 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:31:43,996 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:31:43,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46357 is added to blk_1073741840_1023 (size=825) 2024-11-14T09:31:43,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42901 is added to blk_1073741840_1023 (size=825) 2024-11-14T09:31:44,112 INFO [regionserver/83f56b55f2af:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-14T09:31:44,154 INFO [regionserver/83f56b55f2af:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-14T09:31:44,155 INFO [regionserver/83f56b55f2af:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-14T09:31:44,873 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:31:44,879 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:31:45,873 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:31:45,880 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:31:46,874 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:31:46,880 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:31:47,649 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/WALs/83f56b55f2af,44797,1731576677856/83f56b55f2af%2C44797%2C1731576677856.meta.1731576678653.meta after 4002ms 2024-11-14T09:31:47,649 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/WALs/83f56b55f2af,44797,1731576677856/83f56b55f2af%2C44797%2C1731576677856.meta.1731576678653.meta to hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/oldWALs/83f56b55f2af%2C44797%2C1731576677856.meta.1731576678653.meta 2024-11-14T09:31:47,652 DEBUG [RS:0;83f56b55f2af:44797 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/oldWALs 2024-11-14T09:31:47,652 INFO [RS:0;83f56b55f2af:44797 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 83f56b55f2af%2C44797%2C1731576677856.meta:.meta(num 1731576703641) 2024-11-14T09:31:47,652 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:31:47,652 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:31:47,652 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:31:47,653 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:31:47,653 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:31:47,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42901 is added to blk_1073741844_1028 (size=1162) 2024-11-14T09:31:47,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46357 is added to blk_1073741844_1028 (size=1162) 2024-11-14T09:31:47,658 DEBUG [RS:0;83f56b55f2af:44797 {}] wal.AbstractFSWAL(1256): Moved 4 WAL file(s) to /user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/oldWALs 2024-11-14T09:31:47,658 INFO [RS:0;83f56b55f2af:44797 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 83f56b55f2af%2C44797%2C1731576677856:(num 1731576703742) 2024-11-14T09:31:47,659 DEBUG [RS:0;83f56b55f2af:44797 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T09:31:47,659 INFO [RS:0;83f56b55f2af:44797 {}] regionserver.LeaseManager(133): Closed leases 2024-11-14T09:31:47,659 INFO [RS:0;83f56b55f2af:44797 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-14T09:31:47,659 INFO [RS:0;83f56b55f2af:44797 {}] hbase.ChoreService(370): Chore service for: regionserver/83f56b55f2af:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-14T09:31:47,659 INFO [RS:0;83f56b55f2af:44797 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-14T09:31:47,659 INFO [regionserver/83f56b55f2af:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-14T09:31:47,659 INFO [RS:0;83f56b55f2af:44797 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44797 2024-11-14T09:31:47,661 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44797-0x10115d1302c0001, quorum=127.0.0.1:58079, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/83f56b55f2af,44797,1731576677856 2024-11-14T09:31:47,661 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35271-0x10115d1302c0000, quorum=127.0.0.1:58079, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-14T09:31:47,661 INFO [RS:0;83f56b55f2af:44797 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-14T09:31:47,663 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [83f56b55f2af,44797,1731576677856] 2024-11-14T09:31:47,665 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741834_1014: GenerationStamp not matched, existing replica is blk_1073741834_1010 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-14T09:31:47,666 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/83f56b55f2af,44797,1731576677856 already deleted, retry=false 2024-11-14T09:31:47,666 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 83f56b55f2af,44797,1731576677856 expired; onlineServers=0 2024-11-14T09:31:47,666 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '83f56b55f2af,35271,1731576677802' ***** 2024-11-14T09:31:47,666 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-14T09:31:47,666 INFO [M:0;83f56b55f2af:35271 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-14T09:31:47,666 INFO [M:0;83f56b55f2af:35271 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-14T09:31:47,666 DEBUG [M:0;83f56b55f2af:35271 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-14T09:31:47,667 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-14T09:31:47,667 DEBUG [M:0;83f56b55f2af:35271 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-14T09:31:47,667 DEBUG [master/83f56b55f2af:0:becomeActiveMaster-HFileCleaner.small.0-1731576678046 {}] cleaner.HFileCleaner(306): Exit Thread[master/83f56b55f2af:0:becomeActiveMaster-HFileCleaner.small.0-1731576678046,5,FailOnTimeoutGroup] 2024-11-14T09:31:47,667 DEBUG [master/83f56b55f2af:0:becomeActiveMaster-HFileCleaner.large.0-1731576678046 {}] cleaner.HFileCleaner(306): Exit Thread[master/83f56b55f2af:0:becomeActiveMaster-HFileCleaner.large.0-1731576678046,5,FailOnTimeoutGroup] 2024-11-14T09:31:47,667 INFO [M:0;83f56b55f2af:35271 {}] hbase.ChoreService(370): Chore service for: master/83f56b55f2af:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-14T09:31:47,667 INFO [M:0;83f56b55f2af:35271 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-14T09:31:47,667 DEBUG [M:0;83f56b55f2af:35271 {}] master.HMaster(1795): Stopping service threads 2024-11-14T09:31:47,667 INFO [M:0;83f56b55f2af:35271 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-14T09:31:47,667 INFO [M:0;83f56b55f2af:35271 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-14T09:31:47,667 INFO [M:0;83f56b55f2af:35271 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-14T09:31:47,667 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-14T09:31:47,668 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35271-0x10115d1302c0000, quorum=127.0.0.1:58079, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-14T09:31:47,668 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35271-0x10115d1302c0000, quorum=127.0.0.1:58079, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:31:47,668 DEBUG [M:0;83f56b55f2af:35271 {}] zookeeper.ZKUtil(347): master:35271-0x10115d1302c0000, quorum=127.0.0.1:58079, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-14T09:31:47,668 WARN [M:0;83f56b55f2af:35271 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-14T09:31:47,669 INFO [M:0;83f56b55f2af:35271 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/.lastflushedseqids 2024-11-14T09:31:47,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46357 is added to blk_1073741846_1030 (size=139) 2024-11-14T09:31:47,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42901 is added to blk_1073741846_1030 (size=139) 2024-11-14T09:31:47,674 INFO [M:0;83f56b55f2af:35271 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-14T09:31:47,675 INFO [M:0;83f56b55f2af:35271 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-14T09:31:47,675 DEBUG [M:0;83f56b55f2af:35271 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-14T09:31:47,675 INFO [M:0;83f56b55f2af:35271 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:31:47,675 DEBUG [M:0;83f56b55f2af:35271 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:31:47,675 DEBUG [M:0;83f56b55f2af:35271 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-14T09:31:47,675 DEBUG [M:0;83f56b55f2af:35271 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:31:47,675 INFO [M:0;83f56b55f2af:35271 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.18 KB heapSize=29.16 KB 2024-11-14T09:31:47,675 ERROR [FSHLog-0-hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/MasterData-prefix:83f56b55f2af,35271,1731576677802 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33525,DS-b632735b-7758-4a07-82d2-9408c786a0d0,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:31:47,675 WARN [FSHLog-0-hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/MasterData-prefix:83f56b55f2af,35271,1731576677802 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33525,DS-b632735b-7758-4a07-82d2-9408c786a0d0,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:31:47,676 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 83f56b55f2af%2C35271%2C1731576677802:(num 1731576677953) roll requested 2024-11-14T09:31:47,676 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 83f56b55f2af%2C35271%2C1731576677802.1731576707676 2024-11-14T09:31:47,680 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:31:47,680 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:31:47,680 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:31:47,681 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:31:47,681 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:31:47,681 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/MasterData/WALs/83f56b55f2af,35271,1731576677802/83f56b55f2af%2C35271%2C1731576677802.1731576677953 with entries=53, filesize=26.63 KB; new WAL /user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/MasterData/WALs/83f56b55f2af,35271,1731576677802/83f56b55f2af%2C35271%2C1731576677802.1731576707676 2024-11-14T09:31:47,681 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33525,DS-b632735b-7758-4a07-82d2-9408c786a0d0,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:31:47,681 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33525,DS-b632735b-7758-4a07-82d2-9408c786a0d0,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:31:47,681 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/MasterData/WALs/83f56b55f2af,35271,1731576677802/83f56b55f2af%2C35271%2C1731576677802.1731576677953 2024-11-14T09:31:47,681 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38459:38459),(127.0.0.1/127.0.0.1:40577:40577)] 2024-11-14T09:31:47,682 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/MasterData/WALs/83f56b55f2af,35271,1731576677802/83f56b55f2af%2C35271%2C1731576677802.1731576677953 is not closed yet, will try archiving it next time 2024-11-14T09:31:47,682 WARN [IPC Server handler 1 on default port 41755 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/MasterData/WALs/83f56b55f2af,35271,1731576677802/83f56b55f2af%2C35271%2C1731576677802.1731576677953 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741830_1015 2024-11-14T09:31:47,682 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/MasterData/WALs/83f56b55f2af,35271,1731576677802/83f56b55f2af%2C35271%2C1731576677802.1731576677953 after 1ms 2024-11-14T09:31:47,701 DEBUG [M:0;83f56b55f2af:35271 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f5edbc321f334f2a8fd341c73612b1ff is 82, key is hbase:meta,,1/info:regioninfo/1731576678681/Put/seqid=0 2024-11-14T09:31:47,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46357 is added to blk_1073741848_1033 (size=5672) 2024-11-14T09:31:47,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42901 is added to blk_1073741848_1033 (size=5672) 2024-11-14T09:31:47,707 INFO [M:0;83f56b55f2af:35271 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f5edbc321f334f2a8fd341c73612b1ff 2024-11-14T09:31:47,729 DEBUG [M:0;83f56b55f2af:35271 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/66f1acea56a34e15a7455b3cd95356a3 is 779, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731576679159/Put/seqid=0 2024-11-14T09:31:47,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42901 is added to blk_1073741849_1034 (size=6119) 2024-11-14T09:31:47,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46357 is added to blk_1073741849_1034 (size=6119) 2024-11-14T09:31:47,734 INFO [M:0;83f56b55f2af:35271 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.58 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/66f1acea56a34e15a7455b3cd95356a3 2024-11-14T09:31:47,753 DEBUG [M:0;83f56b55f2af:35271 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/e7c4b55aa77348cd9c854163be794cfb is 69, key is 83f56b55f2af,44797,1731576677856/rs:state/1731576678098/Put/seqid=0 2024-11-14T09:31:47,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46357 is added to blk_1073741850_1035 (size=5156) 2024-11-14T09:31:47,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42901 is added to blk_1073741850_1035 (size=5156) 2024-11-14T09:31:47,758 INFO [M:0;83f56b55f2af:35271 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/e7c4b55aa77348cd9c854163be794cfb 2024-11-14T09:31:47,763 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44797-0x10115d1302c0001, quorum=127.0.0.1:58079, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T09:31:47,763 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44797-0x10115d1302c0001, quorum=127.0.0.1:58079, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T09:31:47,763 INFO [RS:0;83f56b55f2af:44797 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-14T09:31:47,763 INFO [RS:0;83f56b55f2af:44797 {}] regionserver.HRegionServer(1031): Exiting; stopping=83f56b55f2af,44797,1731576677856; zookeeper connection closed. 2024-11-14T09:31:47,764 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@4eec3e70 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@4eec3e70 2024-11-14T09:31:47,764 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-14T09:31:47,777 DEBUG [M:0;83f56b55f2af:35271 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/21dafa7ad3304924a5ba0179a6bf1f12 is 52, key is load_balancer_on/state:d/1731576678788/Put/seqid=0 2024-11-14T09:31:47,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42901 is added to blk_1073741851_1036 (size=5056) 2024-11-14T09:31:47,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46357 is added to blk_1073741851_1036 (size=5056) 2024-11-14T09:31:47,782 INFO [M:0;83f56b55f2af:35271 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/21dafa7ad3304924a5ba0179a6bf1f12 2024-11-14T09:31:47,786 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-14T09:31:47,787 DEBUG [M:0;83f56b55f2af:35271 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f5edbc321f334f2a8fd341c73612b1ff as hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/f5edbc321f334f2a8fd341c73612b1ff 2024-11-14T09:31:47,791 INFO [M:0;83f56b55f2af:35271 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/f5edbc321f334f2a8fd341c73612b1ff, entries=8, sequenceid=56, filesize=5.5 K 2024-11-14T09:31:47,791 DEBUG [M:0;83f56b55f2af:35271 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/66f1acea56a34e15a7455b3cd95356a3 as hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/66f1acea56a34e15a7455b3cd95356a3 2024-11-14T09:31:47,795 INFO [M:0;83f56b55f2af:35271 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/66f1acea56a34e15a7455b3cd95356a3, entries=6, sequenceid=56, filesize=6.0 K 2024-11-14T09:31:47,796 DEBUG [M:0;83f56b55f2af:35271 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/e7c4b55aa77348cd9c854163be794cfb as hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/e7c4b55aa77348cd9c854163be794cfb 2024-11-14T09:31:47,800 INFO [M:0;83f56b55f2af:35271 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/e7c4b55aa77348cd9c854163be794cfb, entries=1, sequenceid=56, filesize=5.0 K 2024-11-14T09:31:47,801 DEBUG [M:0;83f56b55f2af:35271 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/21dafa7ad3304924a5ba0179a6bf1f12 as hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/21dafa7ad3304924a5ba0179a6bf1f12 2024-11-14T09:31:47,804 INFO [M:0;83f56b55f2af:35271 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/21dafa7ad3304924a5ba0179a6bf1f12, entries=1, sequenceid=56, filesize=4.9 K 2024-11-14T09:31:47,805 INFO [M:0;83f56b55f2af:35271 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.18 KB/23738, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 130ms, sequenceid=56, compaction requested=false 2024-11-14T09:31:47,807 INFO [M:0;83f56b55f2af:35271 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:31:47,807 DEBUG [M:0;83f56b55f2af:35271 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731576707675Disabling compacts and flushes for region at 1731576707675Disabling writes for close at 1731576707675Obtaining lock to block concurrent updates at 1731576707675Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731576707675Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23738, getHeapSize=29800, getOffHeapSize=0, getCellsCount=67 at 1731576707675Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731576707682 (+7 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731576707682Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731576707701 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731576707701Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731576707715 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731576707728 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731576707728Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731576707738 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731576707753 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731576707753Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731576707763 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731576707776 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731576707776Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@779ff1a2: reopening flushed file at 1731576707786 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5f338284: reopening flushed file at 1731576707791 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@50ce2935: reopening flushed file at 1731576707795 (+4 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7a22b653: reopening flushed file at 1731576707800 (+5 ms)Finished flush of dataSize ~23.18 KB/23738, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 130ms, sequenceid=56, compaction requested=false at 1731576707805 (+5 ms)Writing region close event to WAL at 1731576707806 (+1 ms)Closed at 1731576707806 2024-11-14T09:31:47,807 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:31:47,807 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:31:47,807 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:31:47,807 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:31:47,807 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:31:47,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46357 is added to blk_1073741847_1031 (size=757) 2024-11-14T09:31:47,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42901 is added to blk_1073741847_1031 (size=757) 2024-11-14T09:31:47,875 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:31:47,881 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:31:48,800 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:31:48,801 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:31:48,813 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:31:48,813 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:31:48,813 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:31:48,813 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:31:48,813 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:31:48,814 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:31:48,816 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:31:48,816 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:31:48,816 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:31:48,818 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:31:48,821 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:31:48,821 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:31:48,875 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:31:48,881 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:31:49,323 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-14T09:31:49,325 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:31:49,325 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:31:49,325 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:31:49,325 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:31:49,339 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:31:49,339 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:31:49,339 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:31:49,340 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:31:49,340 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:31:49,340 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:31:49,343 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:31:49,343 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:31:49,343 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:31:49,345 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:31:49,876 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:31:49,882 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:31:50,666 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741830_1015: GenerationStamp not matched, existing replica is blk_1073741830_1006 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-14T09:31:50,876 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:31:50,882 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:31:51,683 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/MasterData/WALs/83f56b55f2af,35271,1731576677802/83f56b55f2af%2C35271%2C1731576677802.1731576677953 after 4001ms 2024-11-14T09:31:51,683 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/MasterData/WALs/83f56b55f2af,35271,1731576677802/83f56b55f2af%2C35271%2C1731576677802.1731576677953 to hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/MasterData/oldWALs/83f56b55f2af%2C35271%2C1731576677802.1731576677953 2024-11-14T09:31:51,685 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/MasterData/oldWALs/83f56b55f2af%2C35271%2C1731576677802.1731576677953 to hdfs://localhost:41755/user/jenkins/test-data/ca2c6eb1-733a-3abb-775a-39323973e8f8/oldWALs/83f56b55f2af%2C35271%2C1731576677802.1731576677953$masterlocalwal$ 2024-11-14T09:31:51,685 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-14T09:31:51,685 INFO [M:0;83f56b55f2af:35271 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-14T09:31:51,686 INFO [M:0;83f56b55f2af:35271 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35271 2024-11-14T09:31:51,686 INFO [M:0;83f56b55f2af:35271 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-14T09:31:51,788 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35271-0x10115d1302c0000, quorum=127.0.0.1:58079, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T09:31:51,788 INFO [M:0;83f56b55f2af:35271 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-14T09:31:51,788 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35271-0x10115d1302c0000, quorum=127.0.0.1:58079, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T09:31:51,790 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@364c89a9{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T09:31:51,790 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@53853b60{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T09:31:51,790 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T09:31:51,791 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6b789ca1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T09:31:51,791 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@33b10502{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c08a26b4-a2db-1d80-2bc0-28ffd8f80d88/hadoop.log.dir/,STOPPED} 2024-11-14T09:31:51,792 WARN [BP-875957633-172.17.0.2-1731576677118 heartbeating to localhost/127.0.0.1:41755 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T09:31:51,792 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T09:31:51,792 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T09:31:51,792 WARN [BP-875957633-172.17.0.2-1731576677118 heartbeating to localhost/127.0.0.1:41755 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-875957633-172.17.0.2-1731576677118 (Datanode Uuid e8426b3a-f727-4b4d-992e-3ca5f0bb3cdd) service to localhost/127.0.0.1:41755 2024-11-14T09:31:51,793 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c08a26b4-a2db-1d80-2bc0-28ffd8f80d88/cluster_930a5f69-a91f-75e3-0e17-a0ac7c392208/data/data3/current/BP-875957633-172.17.0.2-1731576677118 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T09:31:51,793 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c08a26b4-a2db-1d80-2bc0-28ffd8f80d88/cluster_930a5f69-a91f-75e3-0e17-a0ac7c392208/data/data4/current/BP-875957633-172.17.0.2-1731576677118 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T09:31:51,793 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T09:31:51,793 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-14T09:31:51,793 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-14T09:31:51,794 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-14T09:31:51,794 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-14T09:31:51,795 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1b49eb41{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T09:31:51,795 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7e766a12{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T09:31:51,795 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T09:31:51,796 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@a87b508{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T09:31:51,796 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6c2647c4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c08a26b4-a2db-1d80-2bc0-28ffd8f80d88/hadoop.log.dir/,STOPPED} 2024-11-14T09:31:51,797 WARN [BP-875957633-172.17.0.2-1731576677118 heartbeating to localhost/127.0.0.1:41755 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T09:31:51,797 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T09:31:51,797 WARN [BP-875957633-172.17.0.2-1731576677118 heartbeating to localhost/127.0.0.1:41755 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-875957633-172.17.0.2-1731576677118 (Datanode Uuid 200ac075-92e1-441f-a5ab-faef2c43db4c) service to localhost/127.0.0.1:41755 2024-11-14T09:31:51,797 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T09:31:51,797 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c08a26b4-a2db-1d80-2bc0-28ffd8f80d88/cluster_930a5f69-a91f-75e3-0e17-a0ac7c392208/data/data1/current/BP-875957633-172.17.0.2-1731576677118 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T09:31:51,797 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c08a26b4-a2db-1d80-2bc0-28ffd8f80d88/cluster_930a5f69-a91f-75e3-0e17-a0ac7c392208/data/data2/current/BP-875957633-172.17.0.2-1731576677118 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T09:31:51,798 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T09:31:51,803 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3657796b{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-14T09:31:51,804 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4b0e5af9{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T09:31:51,804 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T09:31:51,804 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6d834c11{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T09:31:51,804 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2e78de33{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c08a26b4-a2db-1d80-2bc0-28ffd8f80d88/hadoop.log.dir/,STOPPED} 2024-11-14T09:31:51,811 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-14T09:31:51,827 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-14T09:31:51,837 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=179 (was 155) Potentially hanging thread: nioEventLoopGroup-33-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41755 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:41755 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41755 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1709469494) connection to localhost/127.0.0.1:41755 from jenkins.hfs.4 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (1709469494) connection to localhost/127.0.0.1:41755 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins.hfs.4@localhost:41755 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41755 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1709469494) connection to localhost/127.0.0.1:41755 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=457 (was 450) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=95 (was 170), ProcessCount=11 (was 11), AvailableMemoryMB=6546 (was 6682) 2024-11-14T09:31:51,844 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=179, OpenFileDescriptor=457, MaxFileDescriptor=1048576, SystemLoadAverage=95, ProcessCount=11, AvailableMemoryMB=6546 2024-11-14T09:31:51,844 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-14T09:31:51,844 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c08a26b4-a2db-1d80-2bc0-28ffd8f80d88/hadoop.log.dir so I do NOT create it in target/test-data/a1ae3c20-b192-70a8-fe6f-42ca04c94617 2024-11-14T09:31:51,844 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c08a26b4-a2db-1d80-2bc0-28ffd8f80d88/hadoop.tmp.dir so I do NOT create it in target/test-data/a1ae3c20-b192-70a8-fe6f-42ca04c94617 2024-11-14T09:31:51,844 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a1ae3c20-b192-70a8-fe6f-42ca04c94617/cluster_cd1132b8-1275-00a1-75d5-57c0ddaed4be, deleteOnExit=true 2024-11-14T09:31:51,844 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-14T09:31:51,845 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a1ae3c20-b192-70a8-fe6f-42ca04c94617/test.cache.data in system properties and HBase conf 2024-11-14T09:31:51,845 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a1ae3c20-b192-70a8-fe6f-42ca04c94617/hadoop.tmp.dir in system properties and HBase conf 2024-11-14T09:31:51,845 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a1ae3c20-b192-70a8-fe6f-42ca04c94617/hadoop.log.dir in system properties and HBase conf 2024-11-14T09:31:51,845 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a1ae3c20-b192-70a8-fe6f-42ca04c94617/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-14T09:31:51,845 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a1ae3c20-b192-70a8-fe6f-42ca04c94617/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-14T09:31:51,845 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-14T09:31:51,845 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-14T09:31:51,845 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a1ae3c20-b192-70a8-fe6f-42ca04c94617/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-14T09:31:51,845 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a1ae3c20-b192-70a8-fe6f-42ca04c94617/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-14T09:31:51,845 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a1ae3c20-b192-70a8-fe6f-42ca04c94617/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-14T09:31:51,845 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a1ae3c20-b192-70a8-fe6f-42ca04c94617/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-14T09:31:51,845 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a1ae3c20-b192-70a8-fe6f-42ca04c94617/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-14T09:31:51,845 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a1ae3c20-b192-70a8-fe6f-42ca04c94617/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-14T09:31:51,845 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a1ae3c20-b192-70a8-fe6f-42ca04c94617/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-14T09:31:51,846 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a1ae3c20-b192-70a8-fe6f-42ca04c94617/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-14T09:31:51,846 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a1ae3c20-b192-70a8-fe6f-42ca04c94617/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-14T09:31:51,846 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a1ae3c20-b192-70a8-fe6f-42ca04c94617/nfs.dump.dir in system properties and HBase conf 2024-11-14T09:31:51,846 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a1ae3c20-b192-70a8-fe6f-42ca04c94617/java.io.tmpdir in system properties and HBase conf 2024-11-14T09:31:51,846 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a1ae3c20-b192-70a8-fe6f-42ca04c94617/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-14T09:31:51,846 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a1ae3c20-b192-70a8-fe6f-42ca04c94617/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-14T09:31:51,846 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a1ae3c20-b192-70a8-fe6f-42ca04c94617/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-14T09:31:51,858 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-14T09:31:51,877 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:31:51,883 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:31:51,934 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T09:31:51,938 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T09:31:51,939 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T09:31:51,939 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T09:31:51,939 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-14T09:31:51,940 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T09:31:51,940 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7f47469{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a1ae3c20-b192-70a8-fe6f-42ca04c94617/hadoop.log.dir/,AVAILABLE} 2024-11-14T09:31:51,941 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@20734922{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T09:31:52,053 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@58a4fc29{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a1ae3c20-b192-70a8-fe6f-42ca04c94617/java.io.tmpdir/jetty-localhost-40053-hadoop-hdfs-3_4_1-tests_jar-_-any-5297752345744160688/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-14T09:31:52,054 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@91ba4af{HTTP/1.1, (http/1.1)}{localhost:40053} 2024-11-14T09:31:52,054 INFO [Time-limited test {}] server.Server(415): Started @186240ms 2024-11-14T09:31:52,066 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-14T09:31:52,120 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T09:31:52,123 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T09:31:52,123 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T09:31:52,123 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T09:31:52,123 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-14T09:31:52,124 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7e53ab0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a1ae3c20-b192-70a8-fe6f-42ca04c94617/hadoop.log.dir/,AVAILABLE} 2024-11-14T09:31:52,124 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@bc64617{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T09:31:52,238 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@721c79ea{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a1ae3c20-b192-70a8-fe6f-42ca04c94617/java.io.tmpdir/jetty-localhost-35367-hadoop-hdfs-3_4_1-tests_jar-_-any-15717435443926897636/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T09:31:52,238 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@571be36f{HTTP/1.1, (http/1.1)}{localhost:35367} 2024-11-14T09:31:52,238 INFO [Time-limited test {}] server.Server(415): Started @186425ms 2024-11-14T09:31:52,240 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T09:31:52,267 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T09:31:52,270 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T09:31:52,270 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T09:31:52,270 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T09:31:52,270 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-14T09:31:52,271 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6aac63c0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a1ae3c20-b192-70a8-fe6f-42ca04c94617/hadoop.log.dir/,AVAILABLE} 2024-11-14T09:31:52,271 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2ad0224a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T09:31:52,332 WARN [Thread-1646 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a1ae3c20-b192-70a8-fe6f-42ca04c94617/cluster_cd1132b8-1275-00a1-75d5-57c0ddaed4be/data/data1/current/BP-515000432-172.17.0.2-1731576711875/current, will proceed with Du for space computation calculation, 2024-11-14T09:31:52,332 WARN [Thread-1647 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a1ae3c20-b192-70a8-fe6f-42ca04c94617/cluster_cd1132b8-1275-00a1-75d5-57c0ddaed4be/data/data2/current/BP-515000432-172.17.0.2-1731576711875/current, will proceed with Du for space computation calculation, 2024-11-14T09:31:52,348 WARN [Thread-1625 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T09:31:52,350 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5b0d977b2d2f64ff with lease ID 0x978ace0133e4198d: Processing first storage report for DS-2b4874e5-94bc-406e-bbdc-fc3ce0bfca92 from datanode DatanodeRegistration(127.0.0.1:40303, datanodeUuid=79ed96a9-ad61-40ff-a62a-97bae959eb62, infoPort=36303, infoSecurePort=0, ipcPort=45449, storageInfo=lv=-57;cid=testClusterID;nsid=2003886183;c=1731576711875) 2024-11-14T09:31:52,350 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5b0d977b2d2f64ff with lease ID 0x978ace0133e4198d: from storage DS-2b4874e5-94bc-406e-bbdc-fc3ce0bfca92 node DatanodeRegistration(127.0.0.1:40303, datanodeUuid=79ed96a9-ad61-40ff-a62a-97bae959eb62, infoPort=36303, infoSecurePort=0, ipcPort=45449, storageInfo=lv=-57;cid=testClusterID;nsid=2003886183;c=1731576711875), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T09:31:52,350 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5b0d977b2d2f64ff with lease ID 0x978ace0133e4198d: Processing first storage report for DS-fa31e196-51ee-457c-b750-39dbee3d984c from datanode DatanodeRegistration(127.0.0.1:40303, datanodeUuid=79ed96a9-ad61-40ff-a62a-97bae959eb62, infoPort=36303, infoSecurePort=0, ipcPort=45449, storageInfo=lv=-57;cid=testClusterID;nsid=2003886183;c=1731576711875) 2024-11-14T09:31:52,350 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5b0d977b2d2f64ff with lease ID 0x978ace0133e4198d: from storage DS-fa31e196-51ee-457c-b750-39dbee3d984c node DatanodeRegistration(127.0.0.1:40303, datanodeUuid=79ed96a9-ad61-40ff-a62a-97bae959eb62, infoPort=36303, infoSecurePort=0, ipcPort=45449, storageInfo=lv=-57;cid=testClusterID;nsid=2003886183;c=1731576711875), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T09:31:52,389 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@757867e7{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a1ae3c20-b192-70a8-fe6f-42ca04c94617/java.io.tmpdir/jetty-localhost-37187-hadoop-hdfs-3_4_1-tests_jar-_-any-13276549519215409460/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T09:31:52,389 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3648e8bf{HTTP/1.1, (http/1.1)}{localhost:37187} 2024-11-14T09:31:52,389 INFO [Time-limited test {}] server.Server(415): Started @186576ms 2024-11-14T09:31:52,390 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T09:31:52,492 WARN [Thread-1672 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a1ae3c20-b192-70a8-fe6f-42ca04c94617/cluster_cd1132b8-1275-00a1-75d5-57c0ddaed4be/data/data3/current/BP-515000432-172.17.0.2-1731576711875/current, will proceed with Du for space computation calculation, 2024-11-14T09:31:52,493 WARN [Thread-1673 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a1ae3c20-b192-70a8-fe6f-42ca04c94617/cluster_cd1132b8-1275-00a1-75d5-57c0ddaed4be/data/data4/current/BP-515000432-172.17.0.2-1731576711875/current, will proceed with Du for space computation calculation, 2024-11-14T09:31:52,508 WARN [Thread-1661 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T09:31:52,510 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9c0480f20884b92f with lease ID 0x978ace0133e4198e: Processing first storage report for DS-a5bee02a-abf8-492d-8688-01dc8aff6164 from datanode DatanodeRegistration(127.0.0.1:41793, datanodeUuid=deca6865-964e-4a6c-86d6-9cc67d9c4030, infoPort=37735, infoSecurePort=0, ipcPort=44975, storageInfo=lv=-57;cid=testClusterID;nsid=2003886183;c=1731576711875) 2024-11-14T09:31:52,510 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9c0480f20884b92f with lease ID 0x978ace0133e4198e: from storage DS-a5bee02a-abf8-492d-8688-01dc8aff6164 node DatanodeRegistration(127.0.0.1:41793, datanodeUuid=deca6865-964e-4a6c-86d6-9cc67d9c4030, infoPort=37735, infoSecurePort=0, ipcPort=44975, storageInfo=lv=-57;cid=testClusterID;nsid=2003886183;c=1731576711875), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T09:31:52,510 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9c0480f20884b92f with lease ID 0x978ace0133e4198e: Processing first storage report for DS-7dc7386d-63eb-45d3-a2a8-dcae408eb248 from datanode DatanodeRegistration(127.0.0.1:41793, datanodeUuid=deca6865-964e-4a6c-86d6-9cc67d9c4030, infoPort=37735, infoSecurePort=0, ipcPort=44975, storageInfo=lv=-57;cid=testClusterID;nsid=2003886183;c=1731576711875) 2024-11-14T09:31:52,510 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9c0480f20884b92f with lease ID 0x978ace0133e4198e: from storage DS-7dc7386d-63eb-45d3-a2a8-dcae408eb248 node DatanodeRegistration(127.0.0.1:41793, datanodeUuid=deca6865-964e-4a6c-86d6-9cc67d9c4030, infoPort=37735, infoSecurePort=0, ipcPort=44975, storageInfo=lv=-57;cid=testClusterID;nsid=2003886183;c=1731576711875), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T09:31:52,613 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a1ae3c20-b192-70a8-fe6f-42ca04c94617 2024-11-14T09:31:52,615 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a1ae3c20-b192-70a8-fe6f-42ca04c94617/cluster_cd1132b8-1275-00a1-75d5-57c0ddaed4be/zookeeper_0, clientPort=58155, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a1ae3c20-b192-70a8-fe6f-42ca04c94617/cluster_cd1132b8-1275-00a1-75d5-57c0ddaed4be/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a1ae3c20-b192-70a8-fe6f-42ca04c94617/cluster_cd1132b8-1275-00a1-75d5-57c0ddaed4be/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-14T09:31:52,616 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=58155 2024-11-14T09:31:52,616 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:31:52,617 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:31:52,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40303 is added to blk_1073741825_1001 (size=7) 2024-11-14T09:31:52,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41793 is added to blk_1073741825_1001 (size=7) 2024-11-14T09:31:52,627 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217 with version=8 2024-11-14T09:31:52,627 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/hbase-staging 2024-11-14T09:31:52,629 INFO [Time-limited test {}] client.ConnectionUtils(128): master/83f56b55f2af:0 server-side Connection retries=45 2024-11-14T09:31:52,629 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T09:31:52,629 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-14T09:31:52,629 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-14T09:31:52,629 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T09:31:52,629 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-14T09:31:52,629 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-14T09:31:52,629 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-14T09:31:52,630 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45895 2024-11-14T09:31:52,631 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:45895 connecting to ZooKeeper ensemble=127.0.0.1:58155 2024-11-14T09:31:52,637 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:458950x0, quorum=127.0.0.1:58155, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-14T09:31:52,637 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:45895-0x10115d1b8360000 connected 2024-11-14T09:31:52,652 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:31:52,653 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:31:52,655 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45895-0x10115d1b8360000, quorum=127.0.0.1:58155, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T09:31:52,655 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217, hbase.cluster.distributed=false 2024-11-14T09:31:52,657 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45895-0x10115d1b8360000, quorum=127.0.0.1:58155, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-14T09:31:52,657 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45895 2024-11-14T09:31:52,657 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45895 2024-11-14T09:31:52,657 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45895 2024-11-14T09:31:52,658 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45895 2024-11-14T09:31:52,658 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45895 2024-11-14T09:31:52,672 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/83f56b55f2af:0 server-side Connection retries=45 2024-11-14T09:31:52,672 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T09:31:52,672 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-14T09:31:52,673 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-14T09:31:52,673 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T09:31:52,673 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-14T09:31:52,673 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-14T09:31:52,673 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-14T09:31:52,673 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45617 2024-11-14T09:31:52,675 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:45617 connecting to ZooKeeper ensemble=127.0.0.1:58155 2024-11-14T09:31:52,675 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:31:52,676 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:31:52,680 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:456170x0, quorum=127.0.0.1:58155, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-14T09:31:52,680 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:456170x0, quorum=127.0.0.1:58155, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T09:31:52,681 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45617-0x10115d1b8360001 connected 2024-11-14T09:31:52,681 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-14T09:31:52,681 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-14T09:31:52,682 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45617-0x10115d1b8360001, quorum=127.0.0.1:58155, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-14T09:31:52,683 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45617-0x10115d1b8360001, quorum=127.0.0.1:58155, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-14T09:31:52,683 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45617 2024-11-14T09:31:52,683 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45617 2024-11-14T09:31:52,683 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45617 2024-11-14T09:31:52,684 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45617 2024-11-14T09:31:52,684 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45617 2024-11-14T09:31:52,695 DEBUG [M:0;83f56b55f2af:45895 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;83f56b55f2af:45895 2024-11-14T09:31:52,695 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/83f56b55f2af,45895,1731576712628 2024-11-14T09:31:52,697 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45895-0x10115d1b8360000, quorum=127.0.0.1:58155, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T09:31:52,697 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45617-0x10115d1b8360001, quorum=127.0.0.1:58155, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T09:31:52,697 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45895-0x10115d1b8360000, quorum=127.0.0.1:58155, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/83f56b55f2af,45895,1731576712628 2024-11-14T09:31:52,699 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45895-0x10115d1b8360000, quorum=127.0.0.1:58155, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:31:52,699 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45617-0x10115d1b8360001, quorum=127.0.0.1:58155, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-14T09:31:52,699 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45617-0x10115d1b8360001, quorum=127.0.0.1:58155, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:31:52,700 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45895-0x10115d1b8360000, quorum=127.0.0.1:58155, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-14T09:31:52,700 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/83f56b55f2af,45895,1731576712628 from backup master directory 2024-11-14T09:31:52,702 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45895-0x10115d1b8360000, quorum=127.0.0.1:58155, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/83f56b55f2af,45895,1731576712628 2024-11-14T09:31:52,702 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45617-0x10115d1b8360001, quorum=127.0.0.1:58155, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T09:31:52,702 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45895-0x10115d1b8360000, quorum=127.0.0.1:58155, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T09:31:52,702 WARN [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-14T09:31:52,702 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=83f56b55f2af,45895,1731576712628 2024-11-14T09:31:52,706 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/hbase.id] with ID: 6787f30c-5606-4a7e-87c1-71971f68c15b 2024-11-14T09:31:52,706 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/.tmp/hbase.id 2024-11-14T09:31:52,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40303 is added to blk_1073741826_1002 (size=42) 2024-11-14T09:31:52,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41793 is added to blk_1073741826_1002 (size=42) 2024-11-14T09:31:52,714 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/.tmp/hbase.id]:[hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/hbase.id] 2024-11-14T09:31:52,725 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:31:52,725 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-14T09:31:52,727 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-14T09:31:52,729 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45895-0x10115d1b8360000, quorum=127.0.0.1:58155, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:31:52,729 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45617-0x10115d1b8360001, quorum=127.0.0.1:58155, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:31:52,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41793 is added to blk_1073741827_1003 (size=196) 2024-11-14T09:31:52,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40303 is added to blk_1073741827_1003 (size=196) 2024-11-14T09:31:52,735 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-14T09:31:52,736 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-14T09:31:52,736 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T09:31:52,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41793 is added to blk_1073741828_1004 (size=1189) 2024-11-14T09:31:52,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40303 is added to blk_1073741828_1004 (size=1189) 2024-11-14T09:31:52,747 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/MasterData/data/master/store 2024-11-14T09:31:52,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41793 is added to blk_1073741829_1005 (size=34) 2024-11-14T09:31:52,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40303 is added to blk_1073741829_1005 (size=34) 2024-11-14T09:31:52,753 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T09:31:52,753 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-14T09:31:52,753 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:31:52,753 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:31:52,753 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-14T09:31:52,754 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:31:52,754 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:31:52,754 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731576712753Disabling compacts and flushes for region at 1731576712753Disabling writes for close at 1731576712754 (+1 ms)Writing region close event to WAL at 1731576712754Closed at 1731576712754 2024-11-14T09:31:52,754 WARN [master/83f56b55f2af:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/MasterData/data/master/store/.initializing 2024-11-14T09:31:52,754 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/MasterData/WALs/83f56b55f2af,45895,1731576712628 2024-11-14T09:31:52,757 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=83f56b55f2af%2C45895%2C1731576712628, suffix=, logDir=hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/MasterData/WALs/83f56b55f2af,45895,1731576712628, archiveDir=hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/MasterData/oldWALs, maxLogs=10 2024-11-14T09:31:52,757 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 83f56b55f2af%2C45895%2C1731576712628.1731576712757 2024-11-14T09:31:52,762 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/MasterData/WALs/83f56b55f2af,45895,1731576712628/83f56b55f2af%2C45895%2C1731576712628.1731576712757 2024-11-14T09:31:52,762 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36303:36303),(127.0.0.1/127.0.0.1:37735:37735)] 2024-11-14T09:31:52,764 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-14T09:31:52,765 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T09:31:52,765 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:31:52,765 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:31:52,767 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:31:52,768 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-14T09:31:52,768 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:31:52,768 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:31:52,768 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:31:52,769 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-14T09:31:52,769 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:31:52,770 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T09:31:52,770 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:31:52,771 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-14T09:31:52,771 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:31:52,771 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T09:31:52,771 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:31:52,772 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-14T09:31:52,772 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:31:52,772 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T09:31:52,773 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:31:52,773 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:31:52,773 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:31:52,775 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:31:52,775 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:31:52,775 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-14T09:31:52,776 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:31:52,778 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T09:31:52,778 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=739297, jitterRate=-0.05993558466434479}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-14T09:31:52,779 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731576712765Initializing all the Stores at 1731576712765Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731576712765Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731576712766 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731576712766Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731576712766Cleaning up temporary data from old regions at 1731576712775 (+9 ms)Region opened successfully at 1731576712779 (+4 ms) 2024-11-14T09:31:52,779 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-14T09:31:52,782 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@be749e5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=83f56b55f2af/172.17.0.2:0 2024-11-14T09:31:52,783 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-14T09:31:52,783 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-14T09:31:52,783 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-14T09:31:52,783 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-14T09:31:52,784 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-14T09:31:52,784 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-14T09:31:52,784 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-14T09:31:52,786 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-14T09:31:52,786 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45895-0x10115d1b8360000, quorum=127.0.0.1:58155, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-14T09:31:52,788 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-14T09:31:52,788 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-14T09:31:52,788 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45895-0x10115d1b8360000, quorum=127.0.0.1:58155, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-14T09:31:52,791 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-14T09:31:52,791 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-14T09:31:52,792 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45895-0x10115d1b8360000, quorum=127.0.0.1:58155, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-14T09:31:52,794 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-14T09:31:52,794 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45895-0x10115d1b8360000, quorum=127.0.0.1:58155, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-14T09:31:52,796 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-14T09:31:52,797 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45895-0x10115d1b8360000, quorum=127.0.0.1:58155, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-14T09:31:52,798 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-14T09:31:52,801 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45617-0x10115d1b8360001, quorum=127.0.0.1:58155, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-14T09:31:52,801 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45617-0x10115d1b8360001, quorum=127.0.0.1:58155, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:31:52,801 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45895-0x10115d1b8360000, quorum=127.0.0.1:58155, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-14T09:31:52,801 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45895-0x10115d1b8360000, quorum=127.0.0.1:58155, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:31:52,803 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=83f56b55f2af,45895,1731576712628, sessionid=0x10115d1b8360000, setting cluster-up flag (Was=false) 2024-11-14T09:31:52,805 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45895-0x10115d1b8360000, quorum=127.0.0.1:58155, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:31:52,805 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45617-0x10115d1b8360001, quorum=127.0.0.1:58155, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:31:52,811 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-14T09:31:52,812 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=83f56b55f2af,45895,1731576712628 2024-11-14T09:31:52,815 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45895-0x10115d1b8360000, quorum=127.0.0.1:58155, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:31:52,815 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45617-0x10115d1b8360001, quorum=127.0.0.1:58155, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:31:52,820 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-14T09:31:52,821 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=83f56b55f2af,45895,1731576712628 2024-11-14T09:31:52,822 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-14T09:31:52,823 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-14T09:31:52,824 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-14T09:31:52,824 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-14T09:31:52,824 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 83f56b55f2af,45895,1731576712628 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-14T09:31:52,825 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/83f56b55f2af:0, corePoolSize=5, maxPoolSize=5 2024-11-14T09:31:52,825 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/83f56b55f2af:0, corePoolSize=5, maxPoolSize=5 2024-11-14T09:31:52,825 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/83f56b55f2af:0, corePoolSize=5, maxPoolSize=5 2024-11-14T09:31:52,825 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/83f56b55f2af:0, corePoolSize=5, maxPoolSize=5 2024-11-14T09:31:52,825 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/83f56b55f2af:0, corePoolSize=10, maxPoolSize=10 2024-11-14T09:31:52,825 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:31:52,825 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/83f56b55f2af:0, corePoolSize=2, maxPoolSize=2 2024-11-14T09:31:52,825 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:31:52,829 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731576742829 2024-11-14T09:31:52,829 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-14T09:31:52,829 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-14T09:31:52,829 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-14T09:31:52,829 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-14T09:31:52,829 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-14T09:31:52,829 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-14T09:31:52,829 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-14T09:31:52,829 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-14T09:31:52,829 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-14T09:31:52,829 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T09:31:52,829 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-14T09:31:52,830 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-14T09:31:52,830 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-14T09:31:52,830 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-14T09:31:52,830 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/83f56b55f2af:0:becomeActiveMaster-HFileCleaner.large.0-1731576712830,5,FailOnTimeoutGroup] 2024-11-14T09:31:52,830 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/83f56b55f2af:0:becomeActiveMaster-HFileCleaner.small.0-1731576712830,5,FailOnTimeoutGroup] 2024-11-14T09:31:52,830 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-14T09:31:52,830 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-14T09:31:52,830 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-14T09:31:52,830 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-14T09:31:52,830 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:31:52,831 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-14T09:31:52,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41793 is added to blk_1073741831_1007 (size=1321) 2024-11-14T09:31:52,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40303 is added to blk_1073741831_1007 (size=1321) 2024-11-14T09:31:52,839 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-14T09:31:52,839 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217 2024-11-14T09:31:52,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40303 is added to blk_1073741832_1008 (size=32) 2024-11-14T09:31:52,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41793 is added to blk_1073741832_1008 (size=32) 2024-11-14T09:31:52,846 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T09:31:52,847 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-14T09:31:52,849 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-14T09:31:52,849 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:31:52,849 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:31:52,850 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-14T09:31:52,851 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-14T09:31:52,851 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:31:52,851 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:31:52,851 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-14T09:31:52,853 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-14T09:31:52,853 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:31:52,853 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:31:52,853 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-14T09:31:52,855 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-14T09:31:52,855 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:31:52,855 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:31:52,855 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-14T09:31:52,856 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/data/hbase/meta/1588230740 2024-11-14T09:31:52,856 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/data/hbase/meta/1588230740 2024-11-14T09:31:52,857 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-14T09:31:52,857 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-14T09:31:52,858 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-14T09:31:52,859 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-14T09:31:52,861 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T09:31:52,861 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=738199, jitterRate=-0.06133176386356354}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-14T09:31:52,862 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731576712846Initializing all the Stores at 1731576712847 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731576712847Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731576712847Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731576712847Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731576712847Cleaning up temporary data from old regions at 1731576712857 (+10 ms)Region opened successfully at 1731576712862 (+5 ms) 2024-11-14T09:31:52,862 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-14T09:31:52,862 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-14T09:31:52,862 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-14T09:31:52,862 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-14T09:31:52,862 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-14T09:31:52,862 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-14T09:31:52,862 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731576712862Disabling compacts and flushes for region at 1731576712862Disabling writes for close at 1731576712862Writing region close event to WAL at 1731576712862Closed at 1731576712862 2024-11-14T09:31:52,864 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T09:31:52,864 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-14T09:31:52,864 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-14T09:31:52,865 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-14T09:31:52,867 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-14T09:31:52,877 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:31:52,883 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:31:52,886 INFO [RS:0;83f56b55f2af:45617 {}] regionserver.HRegionServer(746): ClusterId : 6787f30c-5606-4a7e-87c1-71971f68c15b 2024-11-14T09:31:52,886 DEBUG [RS:0;83f56b55f2af:45617 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-14T09:31:52,888 DEBUG [RS:0;83f56b55f2af:45617 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-14T09:31:52,888 DEBUG [RS:0;83f56b55f2af:45617 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-14T09:31:52,891 DEBUG [RS:0;83f56b55f2af:45617 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-14T09:31:52,891 DEBUG [RS:0;83f56b55f2af:45617 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@29ba35ac, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=83f56b55f2af/172.17.0.2:0 2024-11-14T09:31:52,903 DEBUG [RS:0;83f56b55f2af:45617 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;83f56b55f2af:45617 2024-11-14T09:31:52,903 INFO [RS:0;83f56b55f2af:45617 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-14T09:31:52,903 INFO [RS:0;83f56b55f2af:45617 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-14T09:31:52,903 DEBUG [RS:0;83f56b55f2af:45617 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-14T09:31:52,903 INFO [RS:0;83f56b55f2af:45617 {}] regionserver.HRegionServer(2659): reportForDuty to master=83f56b55f2af,45895,1731576712628 with port=45617, startcode=1731576712672 2024-11-14T09:31:52,904 DEBUG [RS:0;83f56b55f2af:45617 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-14T09:31:52,906 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52755, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-11-14T09:31:52,906 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45895 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 83f56b55f2af,45617,1731576712672 2024-11-14T09:31:52,906 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45895 {}] master.ServerManager(517): Registering regionserver=83f56b55f2af,45617,1731576712672 2024-11-14T09:31:52,908 DEBUG [RS:0;83f56b55f2af:45617 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217 2024-11-14T09:31:52,908 DEBUG [RS:0;83f56b55f2af:45617 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:34251 2024-11-14T09:31:52,908 DEBUG [RS:0;83f56b55f2af:45617 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-14T09:31:52,910 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45895-0x10115d1b8360000, quorum=127.0.0.1:58155, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-14T09:31:52,910 DEBUG [RS:0;83f56b55f2af:45617 {}] zookeeper.ZKUtil(111): regionserver:45617-0x10115d1b8360001, quorum=127.0.0.1:58155, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/83f56b55f2af,45617,1731576712672 2024-11-14T09:31:52,910 WARN [RS:0;83f56b55f2af:45617 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-14T09:31:52,910 INFO [RS:0;83f56b55f2af:45617 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T09:31:52,910 DEBUG [RS:0;83f56b55f2af:45617 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/WALs/83f56b55f2af,45617,1731576712672 2024-11-14T09:31:52,910 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [83f56b55f2af,45617,1731576712672] 2024-11-14T09:31:52,913 INFO [RS:0;83f56b55f2af:45617 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-14T09:31:52,914 INFO [RS:0;83f56b55f2af:45617 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-14T09:31:52,915 INFO [RS:0;83f56b55f2af:45617 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-14T09:31:52,915 INFO [RS:0;83f56b55f2af:45617 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T09:31:52,915 INFO [RS:0;83f56b55f2af:45617 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-14T09:31:52,916 INFO [RS:0;83f56b55f2af:45617 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-14T09:31:52,916 INFO [RS:0;83f56b55f2af:45617 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-14T09:31:52,916 DEBUG [RS:0;83f56b55f2af:45617 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:31:52,916 DEBUG [RS:0;83f56b55f2af:45617 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:31:52,916 DEBUG [RS:0;83f56b55f2af:45617 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:31:52,916 DEBUG [RS:0;83f56b55f2af:45617 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:31:52,916 DEBUG [RS:0;83f56b55f2af:45617 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:31:52,916 DEBUG [RS:0;83f56b55f2af:45617 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/83f56b55f2af:0, corePoolSize=2, maxPoolSize=2 2024-11-14T09:31:52,916 DEBUG [RS:0;83f56b55f2af:45617 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:31:52,916 DEBUG [RS:0;83f56b55f2af:45617 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:31:52,916 DEBUG [RS:0;83f56b55f2af:45617 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:31:52,916 DEBUG [RS:0;83f56b55f2af:45617 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:31:52,916 DEBUG [RS:0;83f56b55f2af:45617 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:31:52,916 DEBUG [RS:0;83f56b55f2af:45617 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:31:52,916 DEBUG [RS:0;83f56b55f2af:45617 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/83f56b55f2af:0, corePoolSize=3, maxPoolSize=3 2024-11-14T09:31:52,916 DEBUG [RS:0;83f56b55f2af:45617 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/83f56b55f2af:0, corePoolSize=3, maxPoolSize=3 2024-11-14T09:31:52,917 INFO [RS:0;83f56b55f2af:45617 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-14T09:31:52,917 INFO [RS:0;83f56b55f2af:45617 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-14T09:31:52,917 INFO [RS:0;83f56b55f2af:45617 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T09:31:52,917 INFO [RS:0;83f56b55f2af:45617 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-14T09:31:52,917 INFO [RS:0;83f56b55f2af:45617 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-14T09:31:52,917 INFO [RS:0;83f56b55f2af:45617 {}] hbase.ChoreService(168): Chore ScheduledChore name=83f56b55f2af,45617,1731576712672-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-14T09:31:52,931 INFO [RS:0;83f56b55f2af:45617 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-14T09:31:52,931 INFO [RS:0;83f56b55f2af:45617 {}] hbase.ChoreService(168): Chore ScheduledChore name=83f56b55f2af,45617,1731576712672-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T09:31:52,931 INFO [RS:0;83f56b55f2af:45617 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T09:31:52,931 INFO [RS:0;83f56b55f2af:45617 {}] regionserver.Replication(171): 83f56b55f2af,45617,1731576712672 started 2024-11-14T09:31:52,945 INFO [RS:0;83f56b55f2af:45617 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T09:31:52,945 INFO [RS:0;83f56b55f2af:45617 {}] regionserver.HRegionServer(1482): Serving as 83f56b55f2af,45617,1731576712672, RpcServer on 83f56b55f2af/172.17.0.2:45617, sessionid=0x10115d1b8360001 2024-11-14T09:31:52,945 DEBUG [RS:0;83f56b55f2af:45617 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-14T09:31:52,945 DEBUG [RS:0;83f56b55f2af:45617 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 83f56b55f2af,45617,1731576712672 2024-11-14T09:31:52,945 DEBUG [RS:0;83f56b55f2af:45617 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '83f56b55f2af,45617,1731576712672' 2024-11-14T09:31:52,945 DEBUG [RS:0;83f56b55f2af:45617 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-14T09:31:52,946 DEBUG [RS:0;83f56b55f2af:45617 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-14T09:31:52,946 DEBUG [RS:0;83f56b55f2af:45617 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-14T09:31:52,946 DEBUG [RS:0;83f56b55f2af:45617 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-14T09:31:52,946 DEBUG [RS:0;83f56b55f2af:45617 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 83f56b55f2af,45617,1731576712672 2024-11-14T09:31:52,946 DEBUG [RS:0;83f56b55f2af:45617 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '83f56b55f2af,45617,1731576712672' 2024-11-14T09:31:52,946 DEBUG [RS:0;83f56b55f2af:45617 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-14T09:31:52,947 DEBUG [RS:0;83f56b55f2af:45617 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-14T09:31:52,947 DEBUG [RS:0;83f56b55f2af:45617 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-14T09:31:52,947 INFO [RS:0;83f56b55f2af:45617 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-14T09:31:52,947 INFO [RS:0;83f56b55f2af:45617 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-14T09:31:53,017 WARN [83f56b55f2af:45895 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-14T09:31:53,049 INFO [RS:0;83f56b55f2af:45617 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=83f56b55f2af%2C45617%2C1731576712672, suffix=, logDir=hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/WALs/83f56b55f2af,45617,1731576712672, archiveDir=hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/oldWALs, maxLogs=32 2024-11-14T09:31:53,050 INFO [RS:0;83f56b55f2af:45617 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 83f56b55f2af%2C45617%2C1731576712672.1731576713050 2024-11-14T09:31:53,055 INFO [RS:0;83f56b55f2af:45617 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/WALs/83f56b55f2af,45617,1731576712672/83f56b55f2af%2C45617%2C1731576712672.1731576713050 2024-11-14T09:31:53,056 DEBUG [RS:0;83f56b55f2af:45617 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36303:36303),(127.0.0.1/127.0.0.1:37735:37735)] 2024-11-14T09:31:53,267 DEBUG [83f56b55f2af:45895 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-14T09:31:53,268 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=83f56b55f2af,45617,1731576712672 2024-11-14T09:31:53,269 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 83f56b55f2af,45617,1731576712672, state=OPENING 2024-11-14T09:31:53,271 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-14T09:31:53,272 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45617-0x10115d1b8360001, quorum=127.0.0.1:58155, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:31:53,272 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45895-0x10115d1b8360000, quorum=127.0.0.1:58155, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:31:53,273 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T09:31:53,273 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-14T09:31:53,273 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T09:31:53,273 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=83f56b55f2af,45617,1731576712672}] 2024-11-14T09:31:53,426 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-14T09:31:53,428 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32853, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-14T09:31:53,432 INFO [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-14T09:31:53,432 INFO [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T09:31:53,434 INFO [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=83f56b55f2af%2C45617%2C1731576712672.meta, suffix=.meta, logDir=hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/WALs/83f56b55f2af,45617,1731576712672, archiveDir=hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/oldWALs, maxLogs=32 2024-11-14T09:31:53,434 INFO [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 83f56b55f2af%2C45617%2C1731576712672.meta.1731576713434.meta 2024-11-14T09:31:53,439 INFO [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/WALs/83f56b55f2af,45617,1731576712672/83f56b55f2af%2C45617%2C1731576712672.meta.1731576713434.meta 2024-11-14T09:31:53,440 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37735:37735),(127.0.0.1/127.0.0.1:36303:36303)] 2024-11-14T09:31:53,441 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-14T09:31:53,441 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-14T09:31:53,441 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-14T09:31:53,441 INFO [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-14T09:31:53,441 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-14T09:31:53,441 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T09:31:53,442 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-14T09:31:53,442 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-14T09:31:53,443 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-14T09:31:53,444 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-14T09:31:53,444 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:31:53,444 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:31:53,444 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-14T09:31:53,445 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-14T09:31:53,445 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:31:53,445 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:31:53,445 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-14T09:31:53,446 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-14T09:31:53,446 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:31:53,446 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:31:53,447 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-14T09:31:53,447 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-14T09:31:53,447 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:31:53,447 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:31:53,448 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-14T09:31:53,448 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/data/hbase/meta/1588230740 2024-11-14T09:31:53,449 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/data/hbase/meta/1588230740 2024-11-14T09:31:53,450 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-14T09:31:53,450 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-14T09:31:53,450 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-14T09:31:53,452 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-14T09:31:53,452 INFO [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=780608, jitterRate=-0.007406279444694519}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-14T09:31:53,452 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-14T09:31:53,453 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731576713442Writing region info on filesystem at 1731576713442Initializing all the Stores at 1731576713442Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731576713442Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731576713443 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731576713443Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731576713443Cleaning up temporary data from old regions at 1731576713450 (+7 ms)Running coprocessor post-open hooks at 1731576713452 (+2 ms)Region opened successfully at 1731576713453 (+1 ms) 2024-11-14T09:31:53,454 INFO [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731576713426 2024-11-14T09:31:53,456 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-14T09:31:53,456 INFO [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-14T09:31:53,457 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=83f56b55f2af,45617,1731576712672 2024-11-14T09:31:53,458 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 83f56b55f2af,45617,1731576712672, state=OPEN 2024-11-14T09:31:53,465 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45617-0x10115d1b8360001, quorum=127.0.0.1:58155, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-14T09:31:53,465 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45895-0x10115d1b8360000, quorum=127.0.0.1:58155, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-14T09:31:53,465 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=83f56b55f2af,45617,1731576712672 2024-11-14T09:31:53,465 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T09:31:53,465 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T09:31:53,468 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-14T09:31:53,468 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=83f56b55f2af,45617,1731576712672 in 192 msec 2024-11-14T09:31:53,471 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-14T09:31:53,471 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 604 msec 2024-11-14T09:31:53,472 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T09:31:53,472 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-14T09:31:53,473 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-14T09:31:53,473 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=83f56b55f2af,45617,1731576712672, seqNum=-1] 2024-11-14T09:31:53,473 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-14T09:31:53,475 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37733, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-14T09:31:53,480 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 656 msec 2024-11-14T09:31:53,480 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731576713480, completionTime=-1 2024-11-14T09:31:53,480 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-14T09:31:53,480 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-14T09:31:53,482 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-14T09:31:53,482 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731576773482 2024-11-14T09:31:53,482 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731576833482 2024-11-14T09:31:53,482 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 1 msec 2024-11-14T09:31:53,482 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=83f56b55f2af,45895,1731576712628-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T09:31:53,482 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=83f56b55f2af,45895,1731576712628-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T09:31:53,482 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=83f56b55f2af,45895,1731576712628-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T09:31:53,482 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-83f56b55f2af:45895, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T09:31:53,482 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-14T09:31:53,483 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-14T09:31:53,484 DEBUG [master/83f56b55f2af:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-14T09:31:53,486 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.784sec 2024-11-14T09:31:53,486 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-14T09:31:53,486 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-14T09:31:53,486 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-14T09:31:53,486 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-14T09:31:53,486 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-14T09:31:53,486 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=83f56b55f2af,45895,1731576712628-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-14T09:31:53,486 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=83f56b55f2af,45895,1731576712628-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-14T09:31:53,488 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-14T09:31:53,488 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-14T09:31:53,488 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=83f56b55f2af,45895,1731576712628-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T09:31:53,586 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7fc5a8f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T09:31:53,586 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 83f56b55f2af,45895,-1 for getting cluster id 2024-11-14T09:31:53,586 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-14T09:31:53,588 DEBUG [HMaster-EventLoopGroup-12-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '6787f30c-5606-4a7e-87c1-71971f68c15b' 2024-11-14T09:31:53,589 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-14T09:31:53,589 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "6787f30c-5606-4a7e-87c1-71971f68c15b" 2024-11-14T09:31:53,589 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@40d4dddf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T09:31:53,589 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [83f56b55f2af,45895,-1] 2024-11-14T09:31:53,590 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-14T09:31:53,590 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T09:31:53,591 INFO [HMaster-EventLoopGroup-12-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58990, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-14T09:31:53,592 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2016970a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T09:31:53,592 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-14T09:31:53,593 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=83f56b55f2af,45617,1731576712672, seqNum=-1] 2024-11-14T09:31:53,594 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-14T09:31:53,595 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45028, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-14T09:31:53,596 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=83f56b55f2af,45895,1731576712628 2024-11-14T09:31:53,596 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:31:53,599 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-14T09:31:53,599 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-14T09:31:53,600 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.AsyncConnectionImpl(321): The fetched master address is 83f56b55f2af,45895,1731576712628 2024-11-14T09:31:53,600 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@77ab48ff 2024-11-14T09:31:53,600 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-14T09:31:53,601 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58994, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-14T09:31:53,601 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45895 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-14T09:31:53,601 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45895 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-14T09:31:53,601 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45895 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testCompactionRecordDoesntBlockRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-14T09:31:53,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45895 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-14T09:31:53,604 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-14T09:31:53,604 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:31:53,604 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45895 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testCompactionRecordDoesntBlockRolling" procId is: 4 2024-11-14T09:31:53,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45895 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-14T09:31:53,605 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-14T09:31:53,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40303 is added to blk_1073741835_1011 (size=405) 2024-11-14T09:31:53,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41793 is added to blk_1073741835_1011 (size=405) 2024-11-14T09:31:53,614 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 7854deb8f0ed443994e1fdef7bd0440e, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731576713601.7854deb8f0ed443994e1fdef7bd0440e.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testCompactionRecordDoesntBlockRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217 2024-11-14T09:31:53,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41793 is added to blk_1073741836_1012 (size=88) 2024-11-14T09:31:53,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40303 is added to blk_1073741836_1012 (size=88) 2024-11-14T09:31:53,620 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731576713601.7854deb8f0ed443994e1fdef7bd0440e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T09:31:53,620 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1722): Closing 7854deb8f0ed443994e1fdef7bd0440e, disabling compactions & flushes 2024-11-14T09:31:53,620 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731576713601.7854deb8f0ed443994e1fdef7bd0440e. 2024-11-14T09:31:53,620 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731576713601.7854deb8f0ed443994e1fdef7bd0440e. 2024-11-14T09:31:53,620 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731576713601.7854deb8f0ed443994e1fdef7bd0440e. after waiting 0 ms 2024-11-14T09:31:53,620 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731576713601.7854deb8f0ed443994e1fdef7bd0440e. 2024-11-14T09:31:53,620 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731576713601.7854deb8f0ed443994e1fdef7bd0440e. 2024-11-14T09:31:53,620 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 7854deb8f0ed443994e1fdef7bd0440e: Waiting for close lock at 1731576713620Disabling compacts and flushes for region at 1731576713620Disabling writes for close at 1731576713620Writing region close event to WAL at 1731576713620Closed at 1731576713620 2024-11-14T09:31:53,622 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-14T09:31:53,622 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731576713601.7854deb8f0ed443994e1fdef7bd0440e.","families":{"info":[{"qualifier":"regioninfo","vlen":87,"tag":[],"timestamp":"1731576713622"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731576713622"}]},"ts":"1731576713622"} 2024-11-14T09:31:53,624 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-14T09:31:53,625 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-14T09:31:53,626 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731576713625"}]},"ts":"1731576713625"} 2024-11-14T09:31:53,627 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLING in hbase:meta 2024-11-14T09:31:53,628 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=7854deb8f0ed443994e1fdef7bd0440e, ASSIGN}] 2024-11-14T09:31:53,629 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=7854deb8f0ed443994e1fdef7bd0440e, ASSIGN 2024-11-14T09:31:53,630 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=7854deb8f0ed443994e1fdef7bd0440e, ASSIGN; state=OFFLINE, location=83f56b55f2af,45617,1731576712672; forceNewPlan=false, retain=false 2024-11-14T09:31:53,781 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=7854deb8f0ed443994e1fdef7bd0440e, regionState=OPENING, regionLocation=83f56b55f2af,45617,1731576712672 2024-11-14T09:31:53,783 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=7854deb8f0ed443994e1fdef7bd0440e, ASSIGN because future has completed 2024-11-14T09:31:53,783 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 7854deb8f0ed443994e1fdef7bd0440e, server=83f56b55f2af,45617,1731576712672}] 2024-11-14T09:31:53,878 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:31:53,884 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:31:53,939 INFO [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731576713601.7854deb8f0ed443994e1fdef7bd0440e. 2024-11-14T09:31:53,939 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 7854deb8f0ed443994e1fdef7bd0440e, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731576713601.7854deb8f0ed443994e1fdef7bd0440e.', STARTKEY => '', ENDKEY => ''} 2024-11-14T09:31:53,939 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testCompactionRecordDoesntBlockRolling 7854deb8f0ed443994e1fdef7bd0440e 2024-11-14T09:31:53,939 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731576713601.7854deb8f0ed443994e1fdef7bd0440e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T09:31:53,939 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 7854deb8f0ed443994e1fdef7bd0440e 2024-11-14T09:31:53,939 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 7854deb8f0ed443994e1fdef7bd0440e 2024-11-14T09:31:53,940 INFO [StoreOpener-7854deb8f0ed443994e1fdef7bd0440e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 7854deb8f0ed443994e1fdef7bd0440e 2024-11-14T09:31:53,942 INFO [StoreOpener-7854deb8f0ed443994e1fdef7bd0440e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7854deb8f0ed443994e1fdef7bd0440e columnFamilyName info 2024-11-14T09:31:53,942 DEBUG [StoreOpener-7854deb8f0ed443994e1fdef7bd0440e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:31:53,942 INFO [StoreOpener-7854deb8f0ed443994e1fdef7bd0440e-1 {}] regionserver.HStore(327): Store=7854deb8f0ed443994e1fdef7bd0440e/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T09:31:53,942 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 7854deb8f0ed443994e1fdef7bd0440e 2024-11-14T09:31:53,943 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7854deb8f0ed443994e1fdef7bd0440e 2024-11-14T09:31:53,943 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7854deb8f0ed443994e1fdef7bd0440e 2024-11-14T09:31:53,943 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 7854deb8f0ed443994e1fdef7bd0440e 2024-11-14T09:31:53,943 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 7854deb8f0ed443994e1fdef7bd0440e 2024-11-14T09:31:53,944 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 7854deb8f0ed443994e1fdef7bd0440e 2024-11-14T09:31:53,946 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7854deb8f0ed443994e1fdef7bd0440e/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T09:31:53,947 INFO [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 7854deb8f0ed443994e1fdef7bd0440e; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=796005, jitterRate=0.012173548340797424}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-14T09:31:53,947 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 7854deb8f0ed443994e1fdef7bd0440e 2024-11-14T09:31:53,947 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 7854deb8f0ed443994e1fdef7bd0440e: Running coprocessor pre-open hook at 1731576713939Writing region info on filesystem at 1731576713939Initializing all the Stores at 1731576713940 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731576713940Cleaning up temporary data from old regions at 1731576713943 (+3 ms)Running coprocessor post-open hooks at 1731576713947 (+4 ms)Region opened successfully at 1731576713947 2024-11-14T09:31:53,948 INFO [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731576713601.7854deb8f0ed443994e1fdef7bd0440e., pid=6, masterSystemTime=1731576713935 2024-11-14T09:31:53,950 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731576713601.7854deb8f0ed443994e1fdef7bd0440e. 2024-11-14T09:31:53,950 INFO [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731576713601.7854deb8f0ed443994e1fdef7bd0440e. 2024-11-14T09:31:53,951 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=7854deb8f0ed443994e1fdef7bd0440e, regionState=OPEN, openSeqNum=2, regionLocation=83f56b55f2af,45617,1731576712672 2024-11-14T09:31:53,954 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 7854deb8f0ed443994e1fdef7bd0440e, server=83f56b55f2af,45617,1731576712672 because future has completed 2024-11-14T09:31:53,957 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-14T09:31:53,957 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 7854deb8f0ed443994e1fdef7bd0440e, server=83f56b55f2af,45617,1731576712672 in 172 msec 2024-11-14T09:31:53,960 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-14T09:31:53,960 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=7854deb8f0ed443994e1fdef7bd0440e, ASSIGN in 329 msec 2024-11-14T09:31:53,961 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-14T09:31:53,961 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731576713961"}]},"ts":"1731576713961"} 2024-11-14T09:31:53,963 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLED in hbase:meta 2024-11-14T09:31:53,964 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-14T09:31:53,966 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 363 msec 2024-11-14T09:31:54,878 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:31:54,884 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:31:55,879 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:31:55,884 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:31:56,879 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:31:56,885 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:31:57,296 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-14T09:31:57,297 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:31:57,297 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:31:57,297 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:31:57,298 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:31:57,298 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:31:57,298 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:31:57,311 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:31:57,311 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:31:57,311 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:31:57,312 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:31:57,312 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:31:57,312 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:31:57,315 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:31:57,315 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:31:57,316 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:31:57,318 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:31:57,880 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:31:57,885 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:31:58,881 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:31:58,886 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:31:58,913 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-14T09:31:58,914 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testCompactionRecordDoesntBlockRolling' 2024-11-14T09:31:59,881 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:31:59,886 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:00,882 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:00,886 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:01,793 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-14T09:32:01,793 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-14T09:32:01,794 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-14T09:32:01,794 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-14T09:32:01,794 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-14T09:32:01,794 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-14T09:32:01,795 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-14T09:32:01,795 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling Metrics about Tables on a single HBase RegionServer 2024-11-14T09:32:01,882 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:01,887 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:02,883 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:02,887 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:03,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45895 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-14T09:32:03,673 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-14T09:32:03,673 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testCompactionRecordDoesntBlockRolling,, stopping at row=TestLogRolling-testCompactionRecordDoesntBlockRolling ,, for max=2147483647 with caching=100 2024-11-14T09:32:03,676 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-14T09:32:03,676 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731576713601.7854deb8f0ed443994e1fdef7bd0440e. 2024-11-14T09:32:03,678 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testCompactionRecordDoesntBlockRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731576713601.7854deb8f0ed443994e1fdef7bd0440e., hostname=83f56b55f2af,45617,1731576712672, seqNum=2] 2024-11-14T09:32:03,685 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45895 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-14T09:32:03,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45895 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-14T09:32:03,691 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-14T09:32:03,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45895 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-14T09:32:03,693 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-14T09:32:03,694 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-14T09:32:03,854 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45617 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-14T09:32:03,854 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83f56b55f2af:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731576713601.7854deb8f0ed443994e1fdef7bd0440e. 2024-11-14T09:32:03,855 INFO [RS_FLUSH_OPERATIONS-regionserver/83f56b55f2af:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 7854deb8f0ed443994e1fdef7bd0440e 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-14T09:32:03,870 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83f56b55f2af:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7854deb8f0ed443994e1fdef7bd0440e/.tmp/info/a45cdd41895f43cdb268b362c0020efd is 1080, key is row0001/info:/1731576723679/Put/seqid=0 2024-11-14T09:32:03,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40303 is added to blk_1073741837_1013 (size=6033) 2024-11-14T09:32:03,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41793 is added to blk_1073741837_1013 (size=6033) 2024-11-14T09:32:03,876 INFO [RS_FLUSH_OPERATIONS-regionserver/83f56b55f2af:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7854deb8f0ed443994e1fdef7bd0440e/.tmp/info/a45cdd41895f43cdb268b362c0020efd 2024-11-14T09:32:03,882 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83f56b55f2af:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7854deb8f0ed443994e1fdef7bd0440e/.tmp/info/a45cdd41895f43cdb268b362c0020efd as hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7854deb8f0ed443994e1fdef7bd0440e/info/a45cdd41895f43cdb268b362c0020efd 2024-11-14T09:32:03,883 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:03,887 INFO [RS_FLUSH_OPERATIONS-regionserver/83f56b55f2af:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7854deb8f0ed443994e1fdef7bd0440e/info/a45cdd41895f43cdb268b362c0020efd, entries=1, sequenceid=5, filesize=5.9 K 2024-11-14T09:32:03,888 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:03,888 INFO [RS_FLUSH_OPERATIONS-regionserver/83f56b55f2af:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 7854deb8f0ed443994e1fdef7bd0440e in 33ms, sequenceid=5, compaction requested=false 2024-11-14T09:32:03,889 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83f56b55f2af:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 7854deb8f0ed443994e1fdef7bd0440e: 2024-11-14T09:32:03,889 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83f56b55f2af:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731576713601.7854deb8f0ed443994e1fdef7bd0440e. 2024-11-14T09:32:03,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83f56b55f2af:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-14T09:32:03,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45895 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-14T09:32:03,896 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-14T09:32:03,897 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 200 msec 2024-11-14T09:32:03,899 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 210 msec 2024-11-14T09:32:04,884 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:04,888 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:05,884 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:05,888 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:06,885 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:06,889 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:07,885 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:07,889 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:08,886 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:08,890 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:09,887 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:09,890 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:10,887 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:10,891 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:11,888 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:11,891 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:12,888 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:12,891 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:13,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45895 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-14T09:32:13,773 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-14T09:32:13,776 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45895 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-14T09:32:13,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45895 {}] procedure2.ProcedureExecutor(1139): Stored pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-14T09:32:13,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45895 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-14T09:32:13,778 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-14T09:32:13,779 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-14T09:32:13,779 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-14T09:32:13,889 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:13,892 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:13,932 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45617 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=10 2024-11-14T09:32:13,933 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83f56b55f2af:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731576713601.7854deb8f0ed443994e1fdef7bd0440e. 2024-11-14T09:32:13,933 INFO [RS_FLUSH_OPERATIONS-regionserver/83f56b55f2af:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2902): Flushing 7854deb8f0ed443994e1fdef7bd0440e 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-14T09:32:13,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83f56b55f2af:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7854deb8f0ed443994e1fdef7bd0440e/.tmp/info/4fa434c8f7a74b9f99e47de95304d37f is 1080, key is row0002/info:/1731576733774/Put/seqid=0 2024-11-14T09:32:13,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40303 is added to blk_1073741838_1014 (size=6033) 2024-11-14T09:32:13,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41793 is added to blk_1073741838_1014 (size=6033) 2024-11-14T09:32:13,943 INFO [RS_FLUSH_OPERATIONS-regionserver/83f56b55f2af:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7854deb8f0ed443994e1fdef7bd0440e/.tmp/info/4fa434c8f7a74b9f99e47de95304d37f 2024-11-14T09:32:13,949 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83f56b55f2af:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7854deb8f0ed443994e1fdef7bd0440e/.tmp/info/4fa434c8f7a74b9f99e47de95304d37f as hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7854deb8f0ed443994e1fdef7bd0440e/info/4fa434c8f7a74b9f99e47de95304d37f 2024-11-14T09:32:13,954 INFO [RS_FLUSH_OPERATIONS-regionserver/83f56b55f2af:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7854deb8f0ed443994e1fdef7bd0440e/info/4fa434c8f7a74b9f99e47de95304d37f, entries=1, sequenceid=9, filesize=5.9 K 2024-11-14T09:32:13,955 INFO [RS_FLUSH_OPERATIONS-regionserver/83f56b55f2af:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 7854deb8f0ed443994e1fdef7bd0440e in 22ms, sequenceid=9, compaction requested=false 2024-11-14T09:32:13,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83f56b55f2af:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2603): Flush status journal for 7854deb8f0ed443994e1fdef7bd0440e: 2024-11-14T09:32:13,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83f56b55f2af:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731576713601.7854deb8f0ed443994e1fdef7bd0440e. 2024-11-14T09:32:13,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83f56b55f2af:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=10 2024-11-14T09:32:13,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45895 {}] master.HMaster(4169): Remote procedure done, pid=10 2024-11-14T09:32:13,959 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-11-14T09:32:13,959 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 178 msec 2024-11-14T09:32:13,962 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 184 msec 2024-11-14T09:32:14,889 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:14,892 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:15,890 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:15,893 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:16,890 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:16,893 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:17,891 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:17,891 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 after 68045ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor204.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:32:17,893 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:17,894 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta after 68031ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor204.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:32:18,892 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:18,894 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:19,893 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:19,894 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:20,893 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:20,895 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:21,894 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:21,895 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:22,612 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-14T09:32:22,894 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:22,895 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:23,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45895 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-14T09:32:23,813 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-14T09:32:23,832 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 83f56b55f2af%2C45617%2C1731576712672.1731576743832 2024-11-14T09:32:23,838 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:32:23,838 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:32:23,838 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:32:23,838 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:32:23,838 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:32:23,838 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/WALs/83f56b55f2af,45617,1731576712672/83f56b55f2af%2C45617%2C1731576712672.1731576713050 with entries=8, filesize=5.41 KB; new WAL /user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/WALs/83f56b55f2af,45617,1731576712672/83f56b55f2af%2C45617%2C1731576712672.1731576743832 2024-11-14T09:32:23,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41793 is added to blk_1073741833_1009 (size=5546) 2024-11-14T09:32:23,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40303 is added to blk_1073741833_1009 (size=5546) 2024-11-14T09:32:23,844 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37735:37735),(127.0.0.1/127.0.0.1:36303:36303)] 2024-11-14T09:32:23,845 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45895 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-14T09:32:23,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45895 {}] procedure2.ProcedureExecutor(1139): Stored pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-14T09:32:23,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45895 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-14T09:32:23,847 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-14T09:32:23,848 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-14T09:32:23,848 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-14T09:32:23,895 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:23,896 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:24,001 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45617 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=12 2024-11-14T09:32:24,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83f56b55f2af:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731576713601.7854deb8f0ed443994e1fdef7bd0440e. 2024-11-14T09:32:24,002 INFO [RS_FLUSH_OPERATIONS-regionserver/83f56b55f2af:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2902): Flushing 7854deb8f0ed443994e1fdef7bd0440e 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-14T09:32:24,006 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83f56b55f2af:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7854deb8f0ed443994e1fdef7bd0440e/.tmp/info/3e7a3690a7f9463eb3e1233e3a02e7c6 is 1080, key is row0003/info:/1731576743814/Put/seqid=0 2024-11-14T09:32:24,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40303 is added to blk_1073741840_1016 (size=6033) 2024-11-14T09:32:24,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41793 is added to blk_1073741840_1016 (size=6033) 2024-11-14T09:32:24,013 INFO [RS_FLUSH_OPERATIONS-regionserver/83f56b55f2af:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7854deb8f0ed443994e1fdef7bd0440e/.tmp/info/3e7a3690a7f9463eb3e1233e3a02e7c6 2024-11-14T09:32:24,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83f56b55f2af:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7854deb8f0ed443994e1fdef7bd0440e/.tmp/info/3e7a3690a7f9463eb3e1233e3a02e7c6 as hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7854deb8f0ed443994e1fdef7bd0440e/info/3e7a3690a7f9463eb3e1233e3a02e7c6 2024-11-14T09:32:24,025 INFO [RS_FLUSH_OPERATIONS-regionserver/83f56b55f2af:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7854deb8f0ed443994e1fdef7bd0440e/info/3e7a3690a7f9463eb3e1233e3a02e7c6, entries=1, sequenceid=13, filesize=5.9 K 2024-11-14T09:32:24,026 INFO [RS_FLUSH_OPERATIONS-regionserver/83f56b55f2af:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 7854deb8f0ed443994e1fdef7bd0440e in 24ms, sequenceid=13, compaction requested=true 2024-11-14T09:32:24,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83f56b55f2af:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2603): Flush status journal for 7854deb8f0ed443994e1fdef7bd0440e: 2024-11-14T09:32:24,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83f56b55f2af:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731576713601.7854deb8f0ed443994e1fdef7bd0440e. 2024-11-14T09:32:24,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83f56b55f2af:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=12 2024-11-14T09:32:24,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45895 {}] master.HMaster(4169): Remote procedure done, pid=12 2024-11-14T09:32:24,030 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-11-14T09:32:24,030 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 180 msec 2024-11-14T09:32:24,033 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 186 msec 2024-11-14T09:32:24,895 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:24,896 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:25,896 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:25,897 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:26,896 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:26,897 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:27,897 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:27,897 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:28,897 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:28,898 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:29,898 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:29,898 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:30,899 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:30,899 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:31,899 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:31,899 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:32,900 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:32,900 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:33,519 INFO [master/83f56b55f2af:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-14T09:32:33,519 INFO [master/83f56b55f2af:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-14T09:32:33,901 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:33,901 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:33,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45895 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-14T09:32:33,913 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-14T09:32:33,913 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-14T09:32:33,914 DEBUG [Time-limited test {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 18099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-14T09:32:33,914 DEBUG [Time-limited test {}] regionserver.HStore(1541): 7854deb8f0ed443994e1fdef7bd0440e/info is initiating minor compaction (all files) 2024-11-14T09:32:33,914 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-14T09:32:33,915 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T09:32:33,915 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of 7854deb8f0ed443994e1fdef7bd0440e/info in TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731576713601.7854deb8f0ed443994e1fdef7bd0440e. 2024-11-14T09:32:33,915 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7854deb8f0ed443994e1fdef7bd0440e/info/a45cdd41895f43cdb268b362c0020efd, hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7854deb8f0ed443994e1fdef7bd0440e/info/4fa434c8f7a74b9f99e47de95304d37f, hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7854deb8f0ed443994e1fdef7bd0440e/info/3e7a3690a7f9463eb3e1233e3a02e7c6] into tmpdir=hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7854deb8f0ed443994e1fdef7bd0440e/.tmp, totalSize=17.7 K 2024-11-14T09:32:33,915 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting a45cdd41895f43cdb268b362c0020efd, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1731576723679 2024-11-14T09:32:33,916 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 4fa434c8f7a74b9f99e47de95304d37f, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=9, earliestPutTs=1731576733774 2024-11-14T09:32:33,916 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 3e7a3690a7f9463eb3e1233e3a02e7c6, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1731576743814 2024-11-14T09:32:33,926 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): 7854deb8f0ed443994e1fdef7bd0440e#info#compaction#47 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-14T09:32:33,927 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7854deb8f0ed443994e1fdef7bd0440e/.tmp/info/7c13885120dd40b68beaa49a65d20736 is 1080, key is row0001/info:/1731576723679/Put/seqid=0 2024-11-14T09:32:33,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40303 is added to blk_1073741841_1017 (size=8296) 2024-11-14T09:32:33,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41793 is added to blk_1073741841_1017 (size=8296) 2024-11-14T09:32:33,937 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7854deb8f0ed443994e1fdef7bd0440e/.tmp/info/7c13885120dd40b68beaa49a65d20736 as hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7854deb8f0ed443994e1fdef7bd0440e/info/7c13885120dd40b68beaa49a65d20736 2024-11-14T09:32:33,942 INFO [Time-limited test {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 7854deb8f0ed443994e1fdef7bd0440e/info of 7854deb8f0ed443994e1fdef7bd0440e into 7c13885120dd40b68beaa49a65d20736(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-14T09:32:33,942 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for 7854deb8f0ed443994e1fdef7bd0440e: 2024-11-14T09:32:33,945 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 83f56b55f2af%2C45617%2C1731576712672.1731576753945 2024-11-14T09:32:33,950 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:32:33,950 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:32:33,950 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:32:33,950 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:32:33,950 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:32:33,950 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/WALs/83f56b55f2af,45617,1731576712672/83f56b55f2af%2C45617%2C1731576712672.1731576743832 with entries=4, filesize=2.45 KB; new WAL /user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/WALs/83f56b55f2af,45617,1731576712672/83f56b55f2af%2C45617%2C1731576712672.1731576753945 2024-11-14T09:32:33,951 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37735:37735),(127.0.0.1/127.0.0.1:36303:36303)] 2024-11-14T09:32:33,951 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/WALs/83f56b55f2af,45617,1731576712672/83f56b55f2af%2C45617%2C1731576712672.1731576743832 is not closed yet, will try archiving it next time 2024-11-14T09:32:33,951 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/WALs/83f56b55f2af,45617,1731576712672/83f56b55f2af%2C45617%2C1731576712672.1731576713050 to hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/oldWALs/83f56b55f2af%2C45617%2C1731576712672.1731576713050 2024-11-14T09:32:33,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40303 is added to blk_1073741839_1015 (size=2520) 2024-11-14T09:32:33,952 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45895 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-14T09:32:33,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41793 is added to blk_1073741839_1015 (size=2520) 2024-11-14T09:32:33,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45895 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-14T09:32:33,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45895 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-14T09:32:33,955 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-14T09:32:33,955 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-14T09:32:33,955 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-14T09:32:34,108 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45617 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=14 2024-11-14T09:32:34,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83f56b55f2af:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731576713601.7854deb8f0ed443994e1fdef7bd0440e. 2024-11-14T09:32:34,109 INFO [RS_FLUSH_OPERATIONS-regionserver/83f56b55f2af:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2902): Flushing 7854deb8f0ed443994e1fdef7bd0440e 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-14T09:32:34,113 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83f56b55f2af:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7854deb8f0ed443994e1fdef7bd0440e/.tmp/info/d4bd282ae2f8495a80c957e8387fd7b7 is 1080, key is row0000/info:/1731576753943/Put/seqid=0 2024-11-14T09:32:34,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40303 is added to blk_1073741843_1019 (size=6033) 2024-11-14T09:32:34,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41793 is added to blk_1073741843_1019 (size=6033) 2024-11-14T09:32:34,118 INFO [RS_FLUSH_OPERATIONS-regionserver/83f56b55f2af:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7854deb8f0ed443994e1fdef7bd0440e/.tmp/info/d4bd282ae2f8495a80c957e8387fd7b7 2024-11-14T09:32:34,123 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83f56b55f2af:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7854deb8f0ed443994e1fdef7bd0440e/.tmp/info/d4bd282ae2f8495a80c957e8387fd7b7 as hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7854deb8f0ed443994e1fdef7bd0440e/info/d4bd282ae2f8495a80c957e8387fd7b7 2024-11-14T09:32:34,128 INFO [RS_FLUSH_OPERATIONS-regionserver/83f56b55f2af:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7854deb8f0ed443994e1fdef7bd0440e/info/d4bd282ae2f8495a80c957e8387fd7b7, entries=1, sequenceid=18, filesize=5.9 K 2024-11-14T09:32:34,130 INFO [RS_FLUSH_OPERATIONS-regionserver/83f56b55f2af:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 7854deb8f0ed443994e1fdef7bd0440e in 20ms, sequenceid=18, compaction requested=false 2024-11-14T09:32:34,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83f56b55f2af:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for 7854deb8f0ed443994e1fdef7bd0440e: 2024-11-14T09:32:34,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83f56b55f2af:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731576713601.7854deb8f0ed443994e1fdef7bd0440e. 2024-11-14T09:32:34,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/83f56b55f2af:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-11-14T09:32:34,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45895 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-11-14T09:32:34,134 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-11-14T09:32:34,134 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 176 msec 2024-11-14T09:32:34,136 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 182 msec 2024-11-14T09:32:34,901 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:34,901 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:35,902 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:35,902 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:36,903 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:36,903 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:37,904 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:37,904 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:38,904 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:38,904 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:38,939 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 7854deb8f0ed443994e1fdef7bd0440e, had cached 0 bytes from a total of 14329 2024-11-14T09:32:39,905 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:39,905 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:40,906 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:40,906 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:41,906 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:41,906 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:42,907 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:42,907 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:43,908 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:43,908 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:43,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45895 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-14T09:32:43,983 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-14T09:32:43,985 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 83f56b55f2af%2C45617%2C1731576712672.1731576763985 2024-11-14T09:32:43,991 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:32:43,991 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:32:43,991 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:32:43,991 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:32:43,991 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:32:43,991 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/WALs/83f56b55f2af,45617,1731576712672/83f56b55f2af%2C45617%2C1731576712672.1731576753945 with entries=3, filesize=1.97 KB; new WAL /user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/WALs/83f56b55f2af,45617,1731576712672/83f56b55f2af%2C45617%2C1731576712672.1731576763985 2024-11-14T09:32:43,992 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37735:37735),(127.0.0.1/127.0.0.1:36303:36303)] 2024-11-14T09:32:43,992 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/WALs/83f56b55f2af,45617,1731576712672/83f56b55f2af%2C45617%2C1731576712672.1731576753945 is not closed yet, will try archiving it next time 2024-11-14T09:32:43,992 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/WALs/83f56b55f2af,45617,1731576712672/83f56b55f2af%2C45617%2C1731576712672.1731576743832 to hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/oldWALs/83f56b55f2af%2C45617%2C1731576712672.1731576743832 2024-11-14T09:32:43,992 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-14T09:32:43,992 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-14T09:32:43,993 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T09:32:43,993 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T09:32:43,993 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T09:32:43,993 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-14T09:32:43,993 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-14T09:32:43,993 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1078266043, stopped=false 2024-11-14T09:32:43,993 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=83f56b55f2af,45895,1731576712628 2024-11-14T09:32:43,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40303 is added to blk_1073741842_1018 (size=2026) 2024-11-14T09:32:43,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41793 is added to blk_1073741842_1018 (size=2026) 2024-11-14T09:32:43,997 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45617-0x10115d1b8360001, quorum=127.0.0.1:58155, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-14T09:32:43,997 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45895-0x10115d1b8360000, quorum=127.0.0.1:58155, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-14T09:32:43,997 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45617-0x10115d1b8360001, quorum=127.0.0.1:58155, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:32:43,997 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45895-0x10115d1b8360000, quorum=127.0.0.1:58155, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:32:43,997 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-14T09:32:43,997 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-14T09:32:43,997 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T09:32:43,997 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T09:32:43,997 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '83f56b55f2af,45617,1731576712672' ***** 2024-11-14T09:32:43,997 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-14T09:32:43,997 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45617-0x10115d1b8360001, quorum=127.0.0.1:58155, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T09:32:43,997 INFO [RS:0;83f56b55f2af:45617 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-14T09:32:43,998 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-14T09:32:43,998 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:45895-0x10115d1b8360000, quorum=127.0.0.1:58155, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T09:32:43,998 INFO [RS:0;83f56b55f2af:45617 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-14T09:32:43,998 INFO [RS:0;83f56b55f2af:45617 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-14T09:32:43,998 INFO [RS:0;83f56b55f2af:45617 {}] regionserver.HRegionServer(3091): Received CLOSE for 7854deb8f0ed443994e1fdef7bd0440e 2024-11-14T09:32:43,998 INFO [RS:0;83f56b55f2af:45617 {}] regionserver.HRegionServer(959): stopping server 83f56b55f2af,45617,1731576712672 2024-11-14T09:32:43,998 INFO [RS:0;83f56b55f2af:45617 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-14T09:32:43,998 INFO [RS:0;83f56b55f2af:45617 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;83f56b55f2af:45617. 2024-11-14T09:32:43,998 DEBUG [RS:0;83f56b55f2af:45617 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T09:32:43,998 DEBUG [RS:0;83f56b55f2af:45617 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T09:32:43,998 DEBUG [RS_CLOSE_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 7854deb8f0ed443994e1fdef7bd0440e, disabling compactions & flushes 2024-11-14T09:32:43,998 INFO [RS_CLOSE_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731576713601.7854deb8f0ed443994e1fdef7bd0440e. 2024-11-14T09:32:43,998 INFO [RS:0;83f56b55f2af:45617 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-14T09:32:43,998 INFO [RS:0;83f56b55f2af:45617 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-14T09:32:43,998 DEBUG [RS_CLOSE_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731576713601.7854deb8f0ed443994e1fdef7bd0440e. 2024-11-14T09:32:43,998 INFO [RS:0;83f56b55f2af:45617 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-14T09:32:43,998 DEBUG [RS_CLOSE_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731576713601.7854deb8f0ed443994e1fdef7bd0440e. after waiting 0 ms 2024-11-14T09:32:43,998 DEBUG [RS_CLOSE_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731576713601.7854deb8f0ed443994e1fdef7bd0440e. 2024-11-14T09:32:43,998 INFO [RS:0;83f56b55f2af:45617 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-14T09:32:43,998 INFO [RS_CLOSE_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 7854deb8f0ed443994e1fdef7bd0440e 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-14T09:32:43,999 INFO [RS:0;83f56b55f2af:45617 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-14T09:32:43,999 DEBUG [RS:0;83f56b55f2af:45617 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 7854deb8f0ed443994e1fdef7bd0440e=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731576713601.7854deb8f0ed443994e1fdef7bd0440e.} 2024-11-14T09:32:43,999 DEBUG [RS:0;83f56b55f2af:45617 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 7854deb8f0ed443994e1fdef7bd0440e 2024-11-14T09:32:43,999 DEBUG [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-14T09:32:43,999 INFO [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-14T09:32:43,999 DEBUG [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-14T09:32:43,999 DEBUG [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-14T09:32:43,999 DEBUG [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-14T09:32:43,999 INFO [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.89 KB heapSize=3.91 KB 2024-11-14T09:32:44,003 DEBUG [RS_CLOSE_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7854deb8f0ed443994e1fdef7bd0440e/.tmp/info/74d327045b534b36bf1338f7ca3d7d25 is 1080, key is row0001/info:/1731576763984/Put/seqid=0 2024-11-14T09:32:44,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40303 is added to blk_1073741845_1021 (size=6033) 2024-11-14T09:32:44,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41793 is added to blk_1073741845_1021 (size=6033) 2024-11-14T09:32:44,010 INFO [RS_CLOSE_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7854deb8f0ed443994e1fdef7bd0440e/.tmp/info/74d327045b534b36bf1338f7ca3d7d25 2024-11-14T09:32:44,015 DEBUG [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/data/hbase/meta/1588230740/.tmp/info/c607ea0104d941829e127cfefbd0e8b5 is 227, key is TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731576713601.7854deb8f0ed443994e1fdef7bd0440e./info:regioninfo/1731576713951/Put/seqid=0 2024-11-14T09:32:44,017 DEBUG [RS_CLOSE_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7854deb8f0ed443994e1fdef7bd0440e/.tmp/info/74d327045b534b36bf1338f7ca3d7d25 as hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7854deb8f0ed443994e1fdef7bd0440e/info/74d327045b534b36bf1338f7ca3d7d25 2024-11-14T09:32:44,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40303 is added to blk_1073741846_1022 (size=7308) 2024-11-14T09:32:44,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41793 is added to blk_1073741846_1022 (size=7308) 2024-11-14T09:32:44,020 INFO [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.65 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/data/hbase/meta/1588230740/.tmp/info/c607ea0104d941829e127cfefbd0e8b5 2024-11-14T09:32:44,022 INFO [RS_CLOSE_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7854deb8f0ed443994e1fdef7bd0440e/info/74d327045b534b36bf1338f7ca3d7d25, entries=1, sequenceid=22, filesize=5.9 K 2024-11-14T09:32:44,023 INFO [RS_CLOSE_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 7854deb8f0ed443994e1fdef7bd0440e in 25ms, sequenceid=22, compaction requested=true 2024-11-14T09:32:44,030 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731576713601.7854deb8f0ed443994e1fdef7bd0440e.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7854deb8f0ed443994e1fdef7bd0440e/info/a45cdd41895f43cdb268b362c0020efd, hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7854deb8f0ed443994e1fdef7bd0440e/info/4fa434c8f7a74b9f99e47de95304d37f, hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7854deb8f0ed443994e1fdef7bd0440e/info/3e7a3690a7f9463eb3e1233e3a02e7c6] to archive 2024-11-14T09:32:44,031 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731576713601.7854deb8f0ed443994e1fdef7bd0440e.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-14T09:32:44,032 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731576713601.7854deb8f0ed443994e1fdef7bd0440e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7854deb8f0ed443994e1fdef7bd0440e/info/a45cdd41895f43cdb268b362c0020efd to hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7854deb8f0ed443994e1fdef7bd0440e/info/a45cdd41895f43cdb268b362c0020efd 2024-11-14T09:32:44,034 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731576713601.7854deb8f0ed443994e1fdef7bd0440e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7854deb8f0ed443994e1fdef7bd0440e/info/4fa434c8f7a74b9f99e47de95304d37f to hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7854deb8f0ed443994e1fdef7bd0440e/info/4fa434c8f7a74b9f99e47de95304d37f 2024-11-14T09:32:44,035 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731576713601.7854deb8f0ed443994e1fdef7bd0440e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7854deb8f0ed443994e1fdef7bd0440e/info/3e7a3690a7f9463eb3e1233e3a02e7c6 to hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7854deb8f0ed443994e1fdef7bd0440e/info/3e7a3690a7f9463eb3e1233e3a02e7c6 2024-11-14T09:32:44,035 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731576713601.7854deb8f0ed443994e1fdef7bd0440e.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=83f56b55f2af:45895 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-14T09:32:44,036 WARN [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731576713601.7854deb8f0ed443994e1fdef7bd0440e.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [a45cdd41895f43cdb268b362c0020efd=6033, 4fa434c8f7a74b9f99e47de95304d37f=6033, 3e7a3690a7f9463eb3e1233e3a02e7c6=6033] 2024-11-14T09:32:44,040 DEBUG [RS_CLOSE_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/7854deb8f0ed443994e1fdef7bd0440e/recovered.edits/25.seqid, newMaxSeqId=25, maxSeqId=1 2024-11-14T09:32:44,040 INFO [RS_CLOSE_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731576713601.7854deb8f0ed443994e1fdef7bd0440e. 2024-11-14T09:32:44,040 DEBUG [RS_CLOSE_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 7854deb8f0ed443994e1fdef7bd0440e: Waiting for close lock at 1731576763998Running coprocessor pre-close hooks at 1731576763998Disabling compacts and flushes for region at 1731576763998Disabling writes for close at 1731576763998Obtaining lock to block concurrent updates at 1731576763998Preparing flush snapshotting stores in 7854deb8f0ed443994e1fdef7bd0440e at 1731576763998Finished memstore snapshotting TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731576713601.7854deb8f0ed443994e1fdef7bd0440e., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1731576763999 (+1 ms)Flushing stores of TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731576713601.7854deb8f0ed443994e1fdef7bd0440e. at 1731576763999Flushing 7854deb8f0ed443994e1fdef7bd0440e/info: creating writer at 1731576763999Flushing 7854deb8f0ed443994e1fdef7bd0440e/info: appending metadata at 1731576764002 (+3 ms)Flushing 7854deb8f0ed443994e1fdef7bd0440e/info: closing flushed file at 1731576764002Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1a47e147: reopening flushed file at 1731576764016 (+14 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 7854deb8f0ed443994e1fdef7bd0440e in 25ms, sequenceid=22, compaction requested=true at 1731576764023 (+7 ms)Writing region close event to WAL at 1731576764036 (+13 ms)Running coprocessor post-close hooks at 1731576764040 (+4 ms)Closed at 1731576764040 2024-11-14T09:32:44,040 DEBUG [RS_CLOSE_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731576713601.7854deb8f0ed443994e1fdef7bd0440e. 2024-11-14T09:32:44,043 DEBUG [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/data/hbase/meta/1588230740/.tmp/ns/abefc0f38ccb4514a5f192d36dfa2489 is 43, key is default/ns:d/1731576713475/Put/seqid=0 2024-11-14T09:32:44,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41793 is added to blk_1073741847_1023 (size=5153) 2024-11-14T09:32:44,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40303 is added to blk_1073741847_1023 (size=5153) 2024-11-14T09:32:44,048 INFO [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/data/hbase/meta/1588230740/.tmp/ns/abefc0f38ccb4514a5f192d36dfa2489 2024-11-14T09:32:44,066 DEBUG [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/data/hbase/meta/1588230740/.tmp/table/5700f3aada554074bd473d9b55773945 is 89, key is TestLogRolling-testCompactionRecordDoesntBlockRolling/table:state/1731576713961/Put/seqid=0 2024-11-14T09:32:44,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41793 is added to blk_1073741848_1024 (size=5508) 2024-11-14T09:32:44,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40303 is added to blk_1073741848_1024 (size=5508) 2024-11-14T09:32:44,071 INFO [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/data/hbase/meta/1588230740/.tmp/table/5700f3aada554074bd473d9b55773945 2024-11-14T09:32:44,077 DEBUG [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/data/hbase/meta/1588230740/.tmp/info/c607ea0104d941829e127cfefbd0e8b5 as hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/data/hbase/meta/1588230740/info/c607ea0104d941829e127cfefbd0e8b5 2024-11-14T09:32:44,082 INFO [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/data/hbase/meta/1588230740/info/c607ea0104d941829e127cfefbd0e8b5, entries=10, sequenceid=11, filesize=7.1 K 2024-11-14T09:32:44,083 DEBUG [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/data/hbase/meta/1588230740/.tmp/ns/abefc0f38ccb4514a5f192d36dfa2489 as hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/data/hbase/meta/1588230740/ns/abefc0f38ccb4514a5f192d36dfa2489 2024-11-14T09:32:44,088 INFO [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/data/hbase/meta/1588230740/ns/abefc0f38ccb4514a5f192d36dfa2489, entries=2, sequenceid=11, filesize=5.0 K 2024-11-14T09:32:44,089 DEBUG [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/data/hbase/meta/1588230740/.tmp/table/5700f3aada554074bd473d9b55773945 as hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/data/hbase/meta/1588230740/table/5700f3aada554074bd473d9b55773945 2024-11-14T09:32:44,093 INFO [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/data/hbase/meta/1588230740/table/5700f3aada554074bd473d9b55773945, entries=2, sequenceid=11, filesize=5.4 K 2024-11-14T09:32:44,094 INFO [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 95ms, sequenceid=11, compaction requested=false 2024-11-14T09:32:44,100 DEBUG [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-14T09:32:44,100 DEBUG [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-14T09:32:44,100 INFO [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-14T09:32:44,101 DEBUG [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731576763999Running coprocessor pre-close hooks at 1731576763999Disabling compacts and flushes for region at 1731576763999Disabling writes for close at 1731576763999Obtaining lock to block concurrent updates at 1731576763999Preparing flush snapshotting stores in 1588230740 at 1731576763999Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1932, getHeapSize=3936, getOffHeapSize=0, getCellsCount=14 at 1731576763999Flushing stores of hbase:meta,,1.1588230740 at 1731576764000 (+1 ms)Flushing 1588230740/info: creating writer at 1731576764000Flushing 1588230740/info: appending metadata at 1731576764015 (+15 ms)Flushing 1588230740/info: closing flushed file at 1731576764015Flushing 1588230740/ns: creating writer at 1731576764025 (+10 ms)Flushing 1588230740/ns: appending metadata at 1731576764042 (+17 ms)Flushing 1588230740/ns: closing flushed file at 1731576764042Flushing 1588230740/table: creating writer at 1731576764052 (+10 ms)Flushing 1588230740/table: appending metadata at 1731576764066 (+14 ms)Flushing 1588230740/table: closing flushed file at 1731576764066Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@eea1b29: reopening flushed file at 1731576764076 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@628cd2cd: reopening flushed file at 1731576764082 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6f00ab1: reopening flushed file at 1731576764088 (+6 ms)Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 95ms, sequenceid=11, compaction requested=false at 1731576764094 (+6 ms)Writing region close event to WAL at 1731576764097 (+3 ms)Running coprocessor post-close hooks at 1731576764100 (+3 ms)Closed at 1731576764100 2024-11-14T09:32:44,101 DEBUG [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-14T09:32:44,199 INFO [RS:0;83f56b55f2af:45617 {}] regionserver.HRegionServer(976): stopping server 83f56b55f2af,45617,1731576712672; all regions closed. 2024-11-14T09:32:44,199 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:32:44,199 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:32:44,199 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:32:44,200 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:32:44,200 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:32:44,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40303 is added to blk_1073741834_1010 (size=3306) 2024-11-14T09:32:44,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41793 is added to blk_1073741834_1010 (size=3306) 2024-11-14T09:32:44,204 DEBUG [RS:0;83f56b55f2af:45617 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/oldWALs 2024-11-14T09:32:44,204 INFO [RS:0;83f56b55f2af:45617 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 83f56b55f2af%2C45617%2C1731576712672.meta:.meta(num 1731576713434) 2024-11-14T09:32:44,204 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:32:44,204 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:32:44,204 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:32:44,204 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:32:44,205 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:32:44,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40303 is added to blk_1073741844_1020 (size=1252) 2024-11-14T09:32:44,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41793 is added to blk_1073741844_1020 (size=1252) 2024-11-14T09:32:44,210 DEBUG [RS:0;83f56b55f2af:45617 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/oldWALs 2024-11-14T09:32:44,210 INFO [RS:0;83f56b55f2af:45617 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 83f56b55f2af%2C45617%2C1731576712672:(num 1731576763985) 2024-11-14T09:32:44,210 DEBUG [RS:0;83f56b55f2af:45617 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T09:32:44,210 INFO [RS:0;83f56b55f2af:45617 {}] regionserver.LeaseManager(133): Closed leases 2024-11-14T09:32:44,210 INFO [RS:0;83f56b55f2af:45617 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-14T09:32:44,210 INFO [RS:0;83f56b55f2af:45617 {}] hbase.ChoreService(370): Chore service for: regionserver/83f56b55f2af:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-14T09:32:44,210 INFO [RS:0;83f56b55f2af:45617 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-14T09:32:44,210 INFO [regionserver/83f56b55f2af:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-14T09:32:44,210 INFO [RS:0;83f56b55f2af:45617 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45617 2024-11-14T09:32:44,212 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45895-0x10115d1b8360000, quorum=127.0.0.1:58155, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-14T09:32:44,212 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45617-0x10115d1b8360001, quorum=127.0.0.1:58155, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/83f56b55f2af,45617,1731576712672 2024-11-14T09:32:44,212 INFO [RS:0;83f56b55f2af:45617 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-14T09:32:44,214 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [83f56b55f2af,45617,1731576712672] 2024-11-14T09:32:44,216 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/83f56b55f2af,45617,1731576712672 already deleted, retry=false 2024-11-14T09:32:44,216 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 83f56b55f2af,45617,1731576712672 expired; onlineServers=0 2024-11-14T09:32:44,216 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '83f56b55f2af,45895,1731576712628' ***** 2024-11-14T09:32:44,216 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-14T09:32:44,217 INFO [M:0;83f56b55f2af:45895 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-14T09:32:44,217 INFO [M:0;83f56b55f2af:45895 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-14T09:32:44,217 DEBUG [M:0;83f56b55f2af:45895 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-14T09:32:44,217 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-14T09:32:44,217 DEBUG [M:0;83f56b55f2af:45895 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-14T09:32:44,217 DEBUG [master/83f56b55f2af:0:becomeActiveMaster-HFileCleaner.large.0-1731576712830 {}] cleaner.HFileCleaner(306): Exit Thread[master/83f56b55f2af:0:becomeActiveMaster-HFileCleaner.large.0-1731576712830,5,FailOnTimeoutGroup] 2024-11-14T09:32:44,217 DEBUG [master/83f56b55f2af:0:becomeActiveMaster-HFileCleaner.small.0-1731576712830 {}] cleaner.HFileCleaner(306): Exit Thread[master/83f56b55f2af:0:becomeActiveMaster-HFileCleaner.small.0-1731576712830,5,FailOnTimeoutGroup] 2024-11-14T09:32:44,217 INFO [M:0;83f56b55f2af:45895 {}] hbase.ChoreService(370): Chore service for: master/83f56b55f2af:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-14T09:32:44,217 INFO [M:0;83f56b55f2af:45895 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-14T09:32:44,217 DEBUG [M:0;83f56b55f2af:45895 {}] master.HMaster(1795): Stopping service threads 2024-11-14T09:32:44,217 INFO [M:0;83f56b55f2af:45895 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-14T09:32:44,217 INFO [M:0;83f56b55f2af:45895 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-14T09:32:44,217 INFO [M:0;83f56b55f2af:45895 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-14T09:32:44,218 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-14T09:32:44,218 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45895-0x10115d1b8360000, quorum=127.0.0.1:58155, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-14T09:32:44,218 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45895-0x10115d1b8360000, quorum=127.0.0.1:58155, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:32:44,218 DEBUG [M:0;83f56b55f2af:45895 {}] zookeeper.ZKUtil(347): master:45895-0x10115d1b8360000, quorum=127.0.0.1:58155, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-14T09:32:44,219 WARN [M:0;83f56b55f2af:45895 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-14T09:32:44,219 INFO [M:0;83f56b55f2af:45895 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/.lastflushedseqids 2024-11-14T09:32:44,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40303 is added to blk_1073741849_1025 (size=130) 2024-11-14T09:32:44,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41793 is added to blk_1073741849_1025 (size=130) 2024-11-14T09:32:44,224 INFO [M:0;83f56b55f2af:45895 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-14T09:32:44,224 INFO [M:0;83f56b55f2af:45895 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-14T09:32:44,224 DEBUG [M:0;83f56b55f2af:45895 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-14T09:32:44,224 INFO [M:0;83f56b55f2af:45895 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:32:44,224 DEBUG [M:0;83f56b55f2af:45895 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:32:44,224 DEBUG [M:0;83f56b55f2af:45895 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-14T09:32:44,225 DEBUG [M:0;83f56b55f2af:45895 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:32:44,225 INFO [M:0;83f56b55f2af:45895 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=43.59 KB heapSize=55 KB 2024-11-14T09:32:44,240 DEBUG [M:0;83f56b55f2af:45895 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/51f99a50962846138dc70162cc4ab3a9 is 82, key is hbase:meta,,1/info:regioninfo/1731576713457/Put/seqid=0 2024-11-14T09:32:44,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41793 is added to blk_1073741850_1026 (size=5672) 2024-11-14T09:32:44,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40303 is added to blk_1073741850_1026 (size=5672) 2024-11-14T09:32:44,246 INFO [M:0;83f56b55f2af:45895 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/51f99a50962846138dc70162cc4ab3a9 2024-11-14T09:32:44,265 DEBUG [M:0;83f56b55f2af:45895 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/36cd0fe47e9c4b129c50bc1601451c6e is 798, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731576713965/Put/seqid=0 2024-11-14T09:32:44,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41793 is added to blk_1073741851_1027 (size=7823) 2024-11-14T09:32:44,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40303 is added to blk_1073741851_1027 (size=7823) 2024-11-14T09:32:44,270 INFO [M:0;83f56b55f2af:45895 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.99 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/36cd0fe47e9c4b129c50bc1601451c6e 2024-11-14T09:32:44,274 INFO [M:0;83f56b55f2af:45895 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 36cd0fe47e9c4b129c50bc1601451c6e 2024-11-14T09:32:44,288 DEBUG [M:0;83f56b55f2af:45895 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/0c85fd746df1413f970c00b60e1cf06f is 69, key is 83f56b55f2af,45617,1731576712672/rs:state/1731576712906/Put/seqid=0 2024-11-14T09:32:44,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41793 is added to blk_1073741852_1028 (size=5156) 2024-11-14T09:32:44,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40303 is added to blk_1073741852_1028 (size=5156) 2024-11-14T09:32:44,292 INFO [M:0;83f56b55f2af:45895 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/0c85fd746df1413f970c00b60e1cf06f 2024-11-14T09:32:44,310 DEBUG [M:0;83f56b55f2af:45895 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/1a5f9d1e1e7c4123877d539f80299220 is 52, key is load_balancer_on/state:d/1731576713598/Put/seqid=0 2024-11-14T09:32:44,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41793 is added to blk_1073741853_1029 (size=5056) 2024-11-14T09:32:44,314 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45617-0x10115d1b8360001, quorum=127.0.0.1:58155, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T09:32:44,314 INFO [RS:0;83f56b55f2af:45617 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-14T09:32:44,314 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45617-0x10115d1b8360001, quorum=127.0.0.1:58155, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T09:32:44,314 INFO [RS:0;83f56b55f2af:45617 {}] regionserver.HRegionServer(1031): Exiting; stopping=83f56b55f2af,45617,1731576712672; zookeeper connection closed. 2024-11-14T09:32:44,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40303 is added to blk_1073741853_1029 (size=5056) 2024-11-14T09:32:44,314 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@7d646dcf {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@7d646dcf 2024-11-14T09:32:44,314 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-14T09:32:44,315 INFO [M:0;83f56b55f2af:45895 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/1a5f9d1e1e7c4123877d539f80299220 2024-11-14T09:32:44,319 DEBUG [M:0;83f56b55f2af:45895 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/51f99a50962846138dc70162cc4ab3a9 as hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/51f99a50962846138dc70162cc4ab3a9 2024-11-14T09:32:44,324 INFO [M:0;83f56b55f2af:45895 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/51f99a50962846138dc70162cc4ab3a9, entries=8, sequenceid=121, filesize=5.5 K 2024-11-14T09:32:44,324 DEBUG [M:0;83f56b55f2af:45895 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/36cd0fe47e9c4b129c50bc1601451c6e as hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/36cd0fe47e9c4b129c50bc1601451c6e 2024-11-14T09:32:44,328 INFO [M:0;83f56b55f2af:45895 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 36cd0fe47e9c4b129c50bc1601451c6e 2024-11-14T09:32:44,329 INFO [M:0;83f56b55f2af:45895 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/36cd0fe47e9c4b129c50bc1601451c6e, entries=14, sequenceid=121, filesize=7.6 K 2024-11-14T09:32:44,329 DEBUG [M:0;83f56b55f2af:45895 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/0c85fd746df1413f970c00b60e1cf06f as hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/0c85fd746df1413f970c00b60e1cf06f 2024-11-14T09:32:44,334 INFO [M:0;83f56b55f2af:45895 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/0c85fd746df1413f970c00b60e1cf06f, entries=1, sequenceid=121, filesize=5.0 K 2024-11-14T09:32:44,335 DEBUG [M:0;83f56b55f2af:45895 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/1a5f9d1e1e7c4123877d539f80299220 as hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/1a5f9d1e1e7c4123877d539f80299220 2024-11-14T09:32:44,340 INFO [M:0;83f56b55f2af:45895 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34251/user/jenkins/test-data/a845d026-c9dc-d1cd-2855-d9e70c427217/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/1a5f9d1e1e7c4123877d539f80299220, entries=1, sequenceid=121, filesize=4.9 K 2024-11-14T09:32:44,341 INFO [M:0;83f56b55f2af:45895 {}] regionserver.HRegion(3140): Finished flush of dataSize ~43.59 KB/44638, heapSize ~54.94 KB/56256, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 116ms, sequenceid=121, compaction requested=false 2024-11-14T09:32:44,342 INFO [M:0;83f56b55f2af:45895 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:32:44,342 DEBUG [M:0;83f56b55f2af:45895 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731576764224Disabling compacts and flushes for region at 1731576764224Disabling writes for close at 1731576764224Obtaining lock to block concurrent updates at 1731576764225 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731576764225Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=44638, getHeapSize=56256, getOffHeapSize=0, getCellsCount=140 at 1731576764225Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731576764226 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731576764226Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731576764240 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731576764240Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731576764250 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731576764264 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731576764264Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731576764274 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731576764287 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731576764287Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731576764296 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731576764310 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731576764310Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@43b9b0b4: reopening flushed file at 1731576764319 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1de4ada6: reopening flushed file at 1731576764324 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6b50fe4b: reopening flushed file at 1731576764329 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4de22117: reopening flushed file at 1731576764335 (+6 ms)Finished flush of dataSize ~43.59 KB/44638, heapSize ~54.94 KB/56256, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 116ms, sequenceid=121, compaction requested=false at 1731576764341 (+6 ms)Writing region close event to WAL at 1731576764342 (+1 ms)Closed at 1731576764342 2024-11-14T09:32:44,343 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:32:44,343 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:32:44,343 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:32:44,343 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:32:44,343 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:32:44,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41793 is added to blk_1073741830_1006 (size=53035) 2024-11-14T09:32:44,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40303 is added to blk_1073741830_1006 (size=53035) 2024-11-14T09:32:44,346 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-14T09:32:44,346 INFO [M:0;83f56b55f2af:45895 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-14T09:32:44,346 INFO [M:0;83f56b55f2af:45895 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45895 2024-11-14T09:32:44,346 INFO [M:0;83f56b55f2af:45895 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-14T09:32:44,448 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45895-0x10115d1b8360000, quorum=127.0.0.1:58155, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T09:32:44,448 INFO [M:0;83f56b55f2af:45895 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-14T09:32:44,448 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45895-0x10115d1b8360000, quorum=127.0.0.1:58155, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T09:32:44,450 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@757867e7{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T09:32:44,450 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3648e8bf{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T09:32:44,451 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T09:32:44,451 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2ad0224a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T09:32:44,451 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6aac63c0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a1ae3c20-b192-70a8-fe6f-42ca04c94617/hadoop.log.dir/,STOPPED} 2024-11-14T09:32:44,453 WARN [BP-515000432-172.17.0.2-1731576711875 heartbeating to localhost/127.0.0.1:34251 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T09:32:44,453 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T09:32:44,453 WARN [BP-515000432-172.17.0.2-1731576711875 heartbeating to localhost/127.0.0.1:34251 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-515000432-172.17.0.2-1731576711875 (Datanode Uuid deca6865-964e-4a6c-86d6-9cc67d9c4030) service to localhost/127.0.0.1:34251 2024-11-14T09:32:44,453 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T09:32:44,453 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a1ae3c20-b192-70a8-fe6f-42ca04c94617/cluster_cd1132b8-1275-00a1-75d5-57c0ddaed4be/data/data3/current/BP-515000432-172.17.0.2-1731576711875 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T09:32:44,453 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a1ae3c20-b192-70a8-fe6f-42ca04c94617/cluster_cd1132b8-1275-00a1-75d5-57c0ddaed4be/data/data4/current/BP-515000432-172.17.0.2-1731576711875 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T09:32:44,454 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T09:32:44,455 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@721c79ea{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T09:32:44,456 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@571be36f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T09:32:44,456 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T09:32:44,456 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@bc64617{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T09:32:44,456 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7e53ab0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a1ae3c20-b192-70a8-fe6f-42ca04c94617/hadoop.log.dir/,STOPPED} 2024-11-14T09:32:44,457 WARN [BP-515000432-172.17.0.2-1731576711875 heartbeating to localhost/127.0.0.1:34251 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T09:32:44,457 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T09:32:44,457 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T09:32:44,457 WARN [BP-515000432-172.17.0.2-1731576711875 heartbeating to localhost/127.0.0.1:34251 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-515000432-172.17.0.2-1731576711875 (Datanode Uuid 79ed96a9-ad61-40ff-a62a-97bae959eb62) service to localhost/127.0.0.1:34251 2024-11-14T09:32:44,458 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a1ae3c20-b192-70a8-fe6f-42ca04c94617/cluster_cd1132b8-1275-00a1-75d5-57c0ddaed4be/data/data1/current/BP-515000432-172.17.0.2-1731576711875 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T09:32:44,458 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a1ae3c20-b192-70a8-fe6f-42ca04c94617/cluster_cd1132b8-1275-00a1-75d5-57c0ddaed4be/data/data2/current/BP-515000432-172.17.0.2-1731576711875 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T09:32:44,458 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T09:32:44,464 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@58a4fc29{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-14T09:32:44,465 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@91ba4af{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T09:32:44,465 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T09:32:44,465 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@20734922{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T09:32:44,465 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7f47469{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a1ae3c20-b192-70a8-fe6f-42ca04c94617/hadoop.log.dir/,STOPPED} 2024-11-14T09:32:44,471 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-14T09:32:44,488 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-14T09:32:44,496 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=206 (was 179) Potentially hanging thread: nioEventLoopGroup-36-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.5@localhost:34251 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1709469494) connection to localhost/127.0.0.1:34251 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-34-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1709469494) connection to localhost/127.0.0.1:34251 from jenkins.hfs.5 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-35-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1709469494) connection to localhost/127.0.0.1:34251 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins@localhost:34251 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34251 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34251 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34251 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34251 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/83f56b55f2af:0.leaseChecker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.regionserver.LeaseManager.run(LeaseManager.java:82) Potentially hanging thread: nioEventLoopGroup-35-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=485 (was 457) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=41 (was 95), ProcessCount=11 (was 11), AvailableMemoryMB=6503 (was 6546) 2024-11-14T09:32:44,503 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRolling Thread=206, OpenFileDescriptor=485, MaxFileDescriptor=1048576, SystemLoadAverage=41, ProcessCount=11, AvailableMemoryMB=6503 2024-11-14T09:32:44,504 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-14T09:32:44,504 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a1ae3c20-b192-70a8-fe6f-42ca04c94617/hadoop.log.dir so I do NOT create it in target/test-data/960e219b-11d8-1120-b96a-489c35f8c7b6 2024-11-14T09:32:44,504 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a1ae3c20-b192-70a8-fe6f-42ca04c94617/hadoop.tmp.dir so I do NOT create it in target/test-data/960e219b-11d8-1120-b96a-489c35f8c7b6 2024-11-14T09:32:44,504 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/960e219b-11d8-1120-b96a-489c35f8c7b6/cluster_b2af9291-0691-383f-0a31-3dfb60af2861, deleteOnExit=true 2024-11-14T09:32:44,504 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-14T09:32:44,504 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/960e219b-11d8-1120-b96a-489c35f8c7b6/test.cache.data in system properties and HBase conf 2024-11-14T09:32:44,504 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/960e219b-11d8-1120-b96a-489c35f8c7b6/hadoop.tmp.dir in system properties and HBase conf 2024-11-14T09:32:44,504 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/960e219b-11d8-1120-b96a-489c35f8c7b6/hadoop.log.dir in system properties and HBase conf 2024-11-14T09:32:44,504 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/960e219b-11d8-1120-b96a-489c35f8c7b6/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-14T09:32:44,504 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/960e219b-11d8-1120-b96a-489c35f8c7b6/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-14T09:32:44,504 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-14T09:32:44,504 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-14T09:32:44,504 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/960e219b-11d8-1120-b96a-489c35f8c7b6/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-14T09:32:44,505 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/960e219b-11d8-1120-b96a-489c35f8c7b6/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-14T09:32:44,505 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/960e219b-11d8-1120-b96a-489c35f8c7b6/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-14T09:32:44,505 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/960e219b-11d8-1120-b96a-489c35f8c7b6/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-14T09:32:44,505 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/960e219b-11d8-1120-b96a-489c35f8c7b6/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-14T09:32:44,505 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/960e219b-11d8-1120-b96a-489c35f8c7b6/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-14T09:32:44,505 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/960e219b-11d8-1120-b96a-489c35f8c7b6/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-14T09:32:44,505 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/960e219b-11d8-1120-b96a-489c35f8c7b6/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-14T09:32:44,505 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/960e219b-11d8-1120-b96a-489c35f8c7b6/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-14T09:32:44,505 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/960e219b-11d8-1120-b96a-489c35f8c7b6/nfs.dump.dir in system properties and HBase conf 2024-11-14T09:32:44,505 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/960e219b-11d8-1120-b96a-489c35f8c7b6/java.io.tmpdir in system properties and HBase conf 2024-11-14T09:32:44,505 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/960e219b-11d8-1120-b96a-489c35f8c7b6/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-14T09:32:44,505 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/960e219b-11d8-1120-b96a-489c35f8c7b6/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-14T09:32:44,505 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/960e219b-11d8-1120-b96a-489c35f8c7b6/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-14T09:32:44,518 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-14T09:32:44,575 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T09:32:44,580 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T09:32:44,581 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T09:32:44,581 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T09:32:44,581 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-14T09:32:44,582 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T09:32:44,582 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@162d5848{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/960e219b-11d8-1120-b96a-489c35f8c7b6/hadoop.log.dir/,AVAILABLE} 2024-11-14T09:32:44,582 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2877d055{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T09:32:44,696 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@57cd8a13{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/960e219b-11d8-1120-b96a-489c35f8c7b6/java.io.tmpdir/jetty-localhost-35843-hadoop-hdfs-3_4_1-tests_jar-_-any-7431600203583399825/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-14T09:32:44,697 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5eb11345{HTTP/1.1, (http/1.1)}{localhost:35843} 2024-11-14T09:32:44,697 INFO [Time-limited test {}] server.Server(415): Started @238883ms 2024-11-14T09:32:44,709 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-14T09:32:44,806 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T09:32:44,810 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T09:32:44,811 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T09:32:44,811 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T09:32:44,811 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-14T09:32:44,812 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@29d3ea34{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/960e219b-11d8-1120-b96a-489c35f8c7b6/hadoop.log.dir/,AVAILABLE} 2024-11-14T09:32:44,812 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@324063a7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T09:32:44,909 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:44,909 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:44,919 INFO [regionserver/83f56b55f2af:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-14T09:32:44,936 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@44ec820f{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/960e219b-11d8-1120-b96a-489c35f8c7b6/java.io.tmpdir/jetty-localhost-46287-hadoop-hdfs-3_4_1-tests_jar-_-any-11169811569743597617/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T09:32:44,937 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@13dbd1c9{HTTP/1.1, (http/1.1)}{localhost:46287} 2024-11-14T09:32:44,937 INFO [Time-limited test {}] server.Server(415): Started @239123ms 2024-11-14T09:32:44,938 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T09:32:44,970 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T09:32:44,972 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T09:32:44,973 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T09:32:44,973 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T09:32:44,973 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-14T09:32:44,973 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@d35d031{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/960e219b-11d8-1120-b96a-489c35f8c7b6/hadoop.log.dir/,AVAILABLE} 2024-11-14T09:32:44,973 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@58157e93{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T09:32:45,041 WARN [Thread-1964 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/960e219b-11d8-1120-b96a-489c35f8c7b6/cluster_b2af9291-0691-383f-0a31-3dfb60af2861/data/data1/current/BP-1299273729-172.17.0.2-1731576764523/current, will proceed with Du for space computation calculation, 2024-11-14T09:32:45,041 WARN [Thread-1965 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/960e219b-11d8-1120-b96a-489c35f8c7b6/cluster_b2af9291-0691-383f-0a31-3dfb60af2861/data/data2/current/BP-1299273729-172.17.0.2-1731576764523/current, will proceed with Du for space computation calculation, 2024-11-14T09:32:45,063 WARN [Thread-1943 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T09:32:45,065 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7fb1f91e1e5318f0 with lease ID 0x13683a54c265d7b2: Processing first storage report for DS-c692f1a8-f1af-46d7-b374-dc9018ca737c from datanode DatanodeRegistration(127.0.0.1:40913, datanodeUuid=fa0404ab-2878-4db9-98f9-58922f43aaea, infoPort=34449, infoSecurePort=0, ipcPort=38723, storageInfo=lv=-57;cid=testClusterID;nsid=1008644099;c=1731576764523) 2024-11-14T09:32:45,066 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7fb1f91e1e5318f0 with lease ID 0x13683a54c265d7b2: from storage DS-c692f1a8-f1af-46d7-b374-dc9018ca737c node DatanodeRegistration(127.0.0.1:40913, datanodeUuid=fa0404ab-2878-4db9-98f9-58922f43aaea, infoPort=34449, infoSecurePort=0, ipcPort=38723, storageInfo=lv=-57;cid=testClusterID;nsid=1008644099;c=1731576764523), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-14T09:32:45,066 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7fb1f91e1e5318f0 with lease ID 0x13683a54c265d7b2: Processing first storage report for DS-07980155-72b0-4627-8f3d-2588e824d002 from datanode DatanodeRegistration(127.0.0.1:40913, datanodeUuid=fa0404ab-2878-4db9-98f9-58922f43aaea, infoPort=34449, infoSecurePort=0, ipcPort=38723, storageInfo=lv=-57;cid=testClusterID;nsid=1008644099;c=1731576764523) 2024-11-14T09:32:45,066 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7fb1f91e1e5318f0 with lease ID 0x13683a54c265d7b2: from storage DS-07980155-72b0-4627-8f3d-2588e824d002 node DatanodeRegistration(127.0.0.1:40913, datanodeUuid=fa0404ab-2878-4db9-98f9-58922f43aaea, infoPort=34449, infoSecurePort=0, ipcPort=38723, storageInfo=lv=-57;cid=testClusterID;nsid=1008644099;c=1731576764523), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T09:32:45,105 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@771d3856{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/960e219b-11d8-1120-b96a-489c35f8c7b6/java.io.tmpdir/jetty-localhost-35005-hadoop-hdfs-3_4_1-tests_jar-_-any-12648744111676654465/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T09:32:45,106 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1fdd1c8b{HTTP/1.1, (http/1.1)}{localhost:35005} 2024-11-14T09:32:45,106 INFO [Time-limited test {}] server.Server(415): Started @239292ms 2024-11-14T09:32:45,107 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T09:32:45,202 WARN [Thread-1990 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/960e219b-11d8-1120-b96a-489c35f8c7b6/cluster_b2af9291-0691-383f-0a31-3dfb60af2861/data/data3/current/BP-1299273729-172.17.0.2-1731576764523/current, will proceed with Du for space computation calculation, 2024-11-14T09:32:45,202 WARN [Thread-1991 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/960e219b-11d8-1120-b96a-489c35f8c7b6/cluster_b2af9291-0691-383f-0a31-3dfb60af2861/data/data4/current/BP-1299273729-172.17.0.2-1731576764523/current, will proceed with Du for space computation calculation, 2024-11-14T09:32:45,217 WARN [Thread-1979 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T09:32:45,219 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x814adc5865dce405 with lease ID 0x13683a54c265d7b3: Processing first storage report for DS-2bb4b2f9-ef83-4c9f-bb97-754d5646c038 from datanode DatanodeRegistration(127.0.0.1:39945, datanodeUuid=43daac12-c975-4a54-8ae8-e39dbc73a6f7, infoPort=35097, infoSecurePort=0, ipcPort=37521, storageInfo=lv=-57;cid=testClusterID;nsid=1008644099;c=1731576764523) 2024-11-14T09:32:45,219 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x814adc5865dce405 with lease ID 0x13683a54c265d7b3: from storage DS-2bb4b2f9-ef83-4c9f-bb97-754d5646c038 node DatanodeRegistration(127.0.0.1:39945, datanodeUuid=43daac12-c975-4a54-8ae8-e39dbc73a6f7, infoPort=35097, infoSecurePort=0, ipcPort=37521, storageInfo=lv=-57;cid=testClusterID;nsid=1008644099;c=1731576764523), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T09:32:45,219 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x814adc5865dce405 with lease ID 0x13683a54c265d7b3: Processing first storage report for DS-4a982b3d-509d-49b1-b9c0-1764a7e012ec from datanode DatanodeRegistration(127.0.0.1:39945, datanodeUuid=43daac12-c975-4a54-8ae8-e39dbc73a6f7, infoPort=35097, infoSecurePort=0, ipcPort=37521, storageInfo=lv=-57;cid=testClusterID;nsid=1008644099;c=1731576764523) 2024-11-14T09:32:45,219 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x814adc5865dce405 with lease ID 0x13683a54c265d7b3: from storage DS-4a982b3d-509d-49b1-b9c0-1764a7e012ec node DatanodeRegistration(127.0.0.1:39945, datanodeUuid=43daac12-c975-4a54-8ae8-e39dbc73a6f7, infoPort=35097, infoSecurePort=0, ipcPort=37521, storageInfo=lv=-57;cid=testClusterID;nsid=1008644099;c=1731576764523), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T09:32:45,228 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/960e219b-11d8-1120-b96a-489c35f8c7b6 2024-11-14T09:32:45,231 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/960e219b-11d8-1120-b96a-489c35f8c7b6/cluster_b2af9291-0691-383f-0a31-3dfb60af2861/zookeeper_0, clientPort=52182, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/960e219b-11d8-1120-b96a-489c35f8c7b6/cluster_b2af9291-0691-383f-0a31-3dfb60af2861/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/960e219b-11d8-1120-b96a-489c35f8c7b6/cluster_b2af9291-0691-383f-0a31-3dfb60af2861/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-14T09:32:45,233 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=52182 2024-11-14T09:32:45,233 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:32:45,234 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:32:45,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40913 is added to blk_1073741825_1001 (size=7) 2024-11-14T09:32:45,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39945 is added to blk_1073741825_1001 (size=7) 2024-11-14T09:32:45,242 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de with version=8 2024-11-14T09:32:45,242 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/hbase-staging 2024-11-14T09:32:45,244 INFO [Time-limited test {}] client.ConnectionUtils(128): master/83f56b55f2af:0 server-side Connection retries=45 2024-11-14T09:32:45,244 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T09:32:45,244 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-14T09:32:45,244 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-14T09:32:45,244 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T09:32:45,244 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-14T09:32:45,244 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-14T09:32:45,244 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-14T09:32:45,245 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44845 2024-11-14T09:32:45,246 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:44845 connecting to ZooKeeper ensemble=127.0.0.1:52182 2024-11-14T09:32:45,258 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:448450x0, quorum=127.0.0.1:52182, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-14T09:32:45,258 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:44845-0x10115d285be0000 connected 2024-11-14T09:32:45,271 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:32:45,273 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:32:45,274 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44845-0x10115d285be0000, quorum=127.0.0.1:52182, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T09:32:45,274 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de, hbase.cluster.distributed=false 2024-11-14T09:32:45,276 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44845-0x10115d285be0000, quorum=127.0.0.1:52182, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-14T09:32:45,276 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44845 2024-11-14T09:32:45,276 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44845 2024-11-14T09:32:45,277 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44845 2024-11-14T09:32:45,277 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44845 2024-11-14T09:32:45,277 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44845 2024-11-14T09:32:45,292 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/83f56b55f2af:0 server-side Connection retries=45 2024-11-14T09:32:45,292 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T09:32:45,292 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-14T09:32:45,292 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-14T09:32:45,292 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T09:32:45,292 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-14T09:32:45,292 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-14T09:32:45,292 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-14T09:32:45,292 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37631 2024-11-14T09:32:45,294 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:37631 connecting to ZooKeeper ensemble=127.0.0.1:52182 2024-11-14T09:32:45,294 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:32:45,295 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:32:45,299 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:376310x0, quorum=127.0.0.1:52182, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-14T09:32:45,300 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:376310x0, quorum=127.0.0.1:52182, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T09:32:45,300 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37631-0x10115d285be0001 connected 2024-11-14T09:32:45,300 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-14T09:32:45,300 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-14T09:32:45,301 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37631-0x10115d285be0001, quorum=127.0.0.1:52182, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-14T09:32:45,302 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37631-0x10115d285be0001, quorum=127.0.0.1:52182, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-14T09:32:45,302 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37631 2024-11-14T09:32:45,302 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37631 2024-11-14T09:32:45,302 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37631 2024-11-14T09:32:45,303 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37631 2024-11-14T09:32:45,303 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37631 2024-11-14T09:32:45,314 DEBUG [M:0;83f56b55f2af:44845 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;83f56b55f2af:44845 2024-11-14T09:32:45,314 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/83f56b55f2af,44845,1731576765244 2024-11-14T09:32:45,316 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37631-0x10115d285be0001, quorum=127.0.0.1:52182, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T09:32:45,316 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44845-0x10115d285be0000, quorum=127.0.0.1:52182, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T09:32:45,316 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44845-0x10115d285be0000, quorum=127.0.0.1:52182, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/83f56b55f2af,44845,1731576765244 2024-11-14T09:32:45,318 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37631-0x10115d285be0001, quorum=127.0.0.1:52182, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-14T09:32:45,318 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44845-0x10115d285be0000, quorum=127.0.0.1:52182, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:32:45,318 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37631-0x10115d285be0001, quorum=127.0.0.1:52182, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:32:45,318 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44845-0x10115d285be0000, quorum=127.0.0.1:52182, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-14T09:32:45,318 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/83f56b55f2af,44845,1731576765244 from backup master directory 2024-11-14T09:32:45,321 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44845-0x10115d285be0000, quorum=127.0.0.1:52182, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/83f56b55f2af,44845,1731576765244 2024-11-14T09:32:45,321 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37631-0x10115d285be0001, quorum=127.0.0.1:52182, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T09:32:45,321 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44845-0x10115d285be0000, quorum=127.0.0.1:52182, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T09:32:45,321 WARN [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-14T09:32:45,321 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=83f56b55f2af,44845,1731576765244 2024-11-14T09:32:45,325 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/hbase.id] with ID: 62b4e73f-5fc9-4a0b-8f00-96c73c495b94 2024-11-14T09:32:45,325 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/.tmp/hbase.id 2024-11-14T09:32:45,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40913 is added to blk_1073741826_1002 (size=42) 2024-11-14T09:32:45,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39945 is added to blk_1073741826_1002 (size=42) 2024-11-14T09:32:45,330 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/.tmp/hbase.id]:[hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/hbase.id] 2024-11-14T09:32:45,339 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:32:45,340 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-14T09:32:45,341 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-14T09:32:45,342 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44845-0x10115d285be0000, quorum=127.0.0.1:52182, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:32:45,342 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37631-0x10115d285be0001, quorum=127.0.0.1:52182, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:32:45,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40913 is added to blk_1073741827_1003 (size=196) 2024-11-14T09:32:45,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39945 is added to blk_1073741827_1003 (size=196) 2024-11-14T09:32:45,350 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-14T09:32:45,351 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-14T09:32:45,351 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T09:32:45,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40913 is added to blk_1073741828_1004 (size=1189) 2024-11-14T09:32:45,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39945 is added to blk_1073741828_1004 (size=1189) 2024-11-14T09:32:45,358 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/MasterData/data/master/store 2024-11-14T09:32:45,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40913 is added to blk_1073741829_1005 (size=34) 2024-11-14T09:32:45,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39945 is added to blk_1073741829_1005 (size=34) 2024-11-14T09:32:45,363 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T09:32:45,363 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-14T09:32:45,363 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:32:45,363 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:32:45,363 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-14T09:32:45,363 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:32:45,363 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:32:45,363 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731576765363Disabling compacts and flushes for region at 1731576765363Disabling writes for close at 1731576765363Writing region close event to WAL at 1731576765363Closed at 1731576765363 2024-11-14T09:32:45,364 WARN [master/83f56b55f2af:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/MasterData/data/master/store/.initializing 2024-11-14T09:32:45,364 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/MasterData/WALs/83f56b55f2af,44845,1731576765244 2024-11-14T09:32:45,366 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=83f56b55f2af%2C44845%2C1731576765244, suffix=, logDir=hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/MasterData/WALs/83f56b55f2af,44845,1731576765244, archiveDir=hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/MasterData/oldWALs, maxLogs=10 2024-11-14T09:32:45,367 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 83f56b55f2af%2C44845%2C1731576765244.1731576765366 2024-11-14T09:32:45,370 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/MasterData/WALs/83f56b55f2af,44845,1731576765244/83f56b55f2af%2C44845%2C1731576765244.1731576765366 2024-11-14T09:32:45,371 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35097:35097),(127.0.0.1/127.0.0.1:34449:34449)] 2024-11-14T09:32:45,372 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-14T09:32:45,372 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T09:32:45,372 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:32:45,372 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:32:45,373 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:32:45,374 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-14T09:32:45,374 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:32:45,375 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:32:45,375 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:32:45,376 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-14T09:32:45,376 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:32:45,376 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T09:32:45,376 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:32:45,377 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-14T09:32:45,377 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:32:45,378 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T09:32:45,378 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:32:45,379 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-14T09:32:45,379 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:32:45,379 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T09:32:45,379 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:32:45,380 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:32:45,380 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:32:45,381 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:32:45,381 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:32:45,382 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-14T09:32:45,383 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:32:45,384 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T09:32:45,385 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=763587, jitterRate=-0.029050037264823914}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-14T09:32:45,385 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731576765372Initializing all the Stores at 1731576765373 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731576765373Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731576765373Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731576765373Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731576765373Cleaning up temporary data from old regions at 1731576765381 (+8 ms)Region opened successfully at 1731576765385 (+4 ms) 2024-11-14T09:32:45,385 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-14T09:32:45,388 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2831a43e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=83f56b55f2af/172.17.0.2:0 2024-11-14T09:32:45,389 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-14T09:32:45,389 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-14T09:32:45,389 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-14T09:32:45,389 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-14T09:32:45,389 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-14T09:32:45,390 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-14T09:32:45,390 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-14T09:32:45,391 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-14T09:32:45,392 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44845-0x10115d285be0000, quorum=127.0.0.1:52182, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-14T09:32:45,393 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-14T09:32:45,393 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-14T09:32:45,394 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44845-0x10115d285be0000, quorum=127.0.0.1:52182, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-14T09:32:45,395 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-14T09:32:45,396 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-14T09:32:45,396 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44845-0x10115d285be0000, quorum=127.0.0.1:52182, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-14T09:32:45,398 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-14T09:32:45,399 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44845-0x10115d285be0000, quorum=127.0.0.1:52182, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-14T09:32:45,400 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-14T09:32:45,401 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44845-0x10115d285be0000, quorum=127.0.0.1:52182, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-14T09:32:45,403 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-14T09:32:45,404 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37631-0x10115d285be0001, quorum=127.0.0.1:52182, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-14T09:32:45,404 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37631-0x10115d285be0001, quorum=127.0.0.1:52182, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:32:45,404 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44845-0x10115d285be0000, quorum=127.0.0.1:52182, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-14T09:32:45,404 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44845-0x10115d285be0000, quorum=127.0.0.1:52182, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:32:45,405 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=83f56b55f2af,44845,1731576765244, sessionid=0x10115d285be0000, setting cluster-up flag (Was=false) 2024-11-14T09:32:45,408 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44845-0x10115d285be0000, quorum=127.0.0.1:52182, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:32:45,408 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37631-0x10115d285be0001, quorum=127.0.0.1:52182, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:32:45,413 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-14T09:32:45,414 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=83f56b55f2af,44845,1731576765244 2024-11-14T09:32:45,418 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37631-0x10115d285be0001, quorum=127.0.0.1:52182, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:32:45,418 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44845-0x10115d285be0000, quorum=127.0.0.1:52182, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:32:45,423 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-14T09:32:45,424 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=83f56b55f2af,44845,1731576765244 2024-11-14T09:32:45,425 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-14T09:32:45,427 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-14T09:32:45,427 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-14T09:32:45,427 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-14T09:32:45,427 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 83f56b55f2af,44845,1731576765244 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-14T09:32:45,428 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/83f56b55f2af:0, corePoolSize=5, maxPoolSize=5 2024-11-14T09:32:45,428 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/83f56b55f2af:0, corePoolSize=5, maxPoolSize=5 2024-11-14T09:32:45,428 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/83f56b55f2af:0, corePoolSize=5, maxPoolSize=5 2024-11-14T09:32:45,428 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/83f56b55f2af:0, corePoolSize=5, maxPoolSize=5 2024-11-14T09:32:45,428 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/83f56b55f2af:0, corePoolSize=10, maxPoolSize=10 2024-11-14T09:32:45,428 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:32:45,429 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/83f56b55f2af:0, corePoolSize=2, maxPoolSize=2 2024-11-14T09:32:45,429 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:32:45,429 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731576795429 2024-11-14T09:32:45,429 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-14T09:32:45,430 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-14T09:32:45,430 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-14T09:32:45,430 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-14T09:32:45,430 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-14T09:32:45,430 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-14T09:32:45,430 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-14T09:32:45,430 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T09:32:45,430 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-14T09:32:45,430 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-14T09:32:45,430 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-14T09:32:45,430 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-14T09:32:45,431 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-14T09:32:45,431 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-14T09:32:45,431 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/83f56b55f2af:0:becomeActiveMaster-HFileCleaner.large.0-1731576765431,5,FailOnTimeoutGroup] 2024-11-14T09:32:45,431 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:32:45,431 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/83f56b55f2af:0:becomeActiveMaster-HFileCleaner.small.0-1731576765431,5,FailOnTimeoutGroup] 2024-11-14T09:32:45,431 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-14T09:32:45,431 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-14T09:32:45,431 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-14T09:32:45,431 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-14T09:32:45,431 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-14T09:32:45,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40913 is added to blk_1073741831_1007 (size=1321) 2024-11-14T09:32:45,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39945 is added to blk_1073741831_1007 (size=1321) 2024-11-14T09:32:45,442 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-14T09:32:45,442 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de 2024-11-14T09:32:45,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40913 is added to blk_1073741832_1008 (size=32) 2024-11-14T09:32:45,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39945 is added to blk_1073741832_1008 (size=32) 2024-11-14T09:32:45,447 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T09:32:45,448 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-14T09:32:45,449 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-14T09:32:45,449 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:32:45,450 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:32:45,450 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-14T09:32:45,451 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-14T09:32:45,451 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:32:45,451 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:32:45,451 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-14T09:32:45,452 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-14T09:32:45,452 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:32:45,452 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:32:45,453 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-14T09:32:45,454 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-14T09:32:45,454 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:32:45,454 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:32:45,454 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-14T09:32:45,454 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/hbase/meta/1588230740 2024-11-14T09:32:45,455 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/hbase/meta/1588230740 2024-11-14T09:32:45,456 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-14T09:32:45,456 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-14T09:32:45,456 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-14T09:32:45,457 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-14T09:32:45,458 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T09:32:45,459 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=723465, jitterRate=-0.08006668090820312}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-14T09:32:45,459 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731576765447Initializing all the Stores at 1731576765448 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731576765448Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731576765448Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731576765448Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731576765448Cleaning up temporary data from old regions at 1731576765456 (+8 ms)Region opened successfully at 1731576765459 (+3 ms) 2024-11-14T09:32:45,459 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-14T09:32:45,459 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-14T09:32:45,459 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-14T09:32:45,459 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-14T09:32:45,459 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-14T09:32:45,460 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-14T09:32:45,460 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731576765459Disabling compacts and flushes for region at 1731576765459Disabling writes for close at 1731576765459Writing region close event to WAL at 1731576765460 (+1 ms)Closed at 1731576765460 2024-11-14T09:32:45,461 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T09:32:45,461 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-14T09:32:45,461 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-14T09:32:45,462 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-14T09:32:45,463 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-14T09:32:45,505 INFO [RS:0;83f56b55f2af:37631 {}] regionserver.HRegionServer(746): ClusterId : 62b4e73f-5fc9-4a0b-8f00-96c73c495b94 2024-11-14T09:32:45,505 DEBUG [RS:0;83f56b55f2af:37631 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-14T09:32:45,507 DEBUG [RS:0;83f56b55f2af:37631 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-14T09:32:45,507 DEBUG [RS:0;83f56b55f2af:37631 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-14T09:32:45,509 DEBUG [RS:0;83f56b55f2af:37631 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-14T09:32:45,509 DEBUG [RS:0;83f56b55f2af:37631 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@24391b0c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=83f56b55f2af/172.17.0.2:0 2024-11-14T09:32:45,521 DEBUG [RS:0;83f56b55f2af:37631 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;83f56b55f2af:37631 2024-11-14T09:32:45,521 INFO [RS:0;83f56b55f2af:37631 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-14T09:32:45,521 INFO [RS:0;83f56b55f2af:37631 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-14T09:32:45,521 DEBUG [RS:0;83f56b55f2af:37631 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-14T09:32:45,521 INFO [RS:0;83f56b55f2af:37631 {}] regionserver.HRegionServer(2659): reportForDuty to master=83f56b55f2af,44845,1731576765244 with port=37631, startcode=1731576765291 2024-11-14T09:32:45,522 DEBUG [RS:0;83f56b55f2af:37631 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-14T09:32:45,523 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44591, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=RegionServerStatusService 2024-11-14T09:32:45,524 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44845 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 83f56b55f2af,37631,1731576765291 2024-11-14T09:32:45,524 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44845 {}] master.ServerManager(517): Registering regionserver=83f56b55f2af,37631,1731576765291 2024-11-14T09:32:45,525 DEBUG [RS:0;83f56b55f2af:37631 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de 2024-11-14T09:32:45,525 DEBUG [RS:0;83f56b55f2af:37631 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:45921 2024-11-14T09:32:45,525 DEBUG [RS:0;83f56b55f2af:37631 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-14T09:32:45,528 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44845-0x10115d285be0000, quorum=127.0.0.1:52182, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-14T09:32:45,528 DEBUG [RS:0;83f56b55f2af:37631 {}] zookeeper.ZKUtil(111): regionserver:37631-0x10115d285be0001, quorum=127.0.0.1:52182, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/83f56b55f2af,37631,1731576765291 2024-11-14T09:32:45,528 WARN [RS:0;83f56b55f2af:37631 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-14T09:32:45,528 INFO [RS:0;83f56b55f2af:37631 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T09:32:45,528 DEBUG [RS:0;83f56b55f2af:37631 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/WALs/83f56b55f2af,37631,1731576765291 2024-11-14T09:32:45,528 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [83f56b55f2af,37631,1731576765291] 2024-11-14T09:32:45,531 INFO [RS:0;83f56b55f2af:37631 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-14T09:32:45,533 INFO [RS:0;83f56b55f2af:37631 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-14T09:32:45,533 INFO [RS:0;83f56b55f2af:37631 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-14T09:32:45,533 INFO [RS:0;83f56b55f2af:37631 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T09:32:45,533 INFO [RS:0;83f56b55f2af:37631 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-14T09:32:45,534 INFO [RS:0;83f56b55f2af:37631 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-14T09:32:45,534 INFO [RS:0;83f56b55f2af:37631 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-14T09:32:45,534 DEBUG [RS:0;83f56b55f2af:37631 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:32:45,534 DEBUG [RS:0;83f56b55f2af:37631 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:32:45,534 DEBUG [RS:0;83f56b55f2af:37631 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:32:45,534 DEBUG [RS:0;83f56b55f2af:37631 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:32:45,534 DEBUG [RS:0;83f56b55f2af:37631 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:32:45,534 DEBUG [RS:0;83f56b55f2af:37631 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/83f56b55f2af:0, corePoolSize=2, maxPoolSize=2 2024-11-14T09:32:45,534 DEBUG [RS:0;83f56b55f2af:37631 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:32:45,534 DEBUG [RS:0;83f56b55f2af:37631 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:32:45,534 DEBUG [RS:0;83f56b55f2af:37631 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:32:45,534 DEBUG [RS:0;83f56b55f2af:37631 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:32:45,534 DEBUG [RS:0;83f56b55f2af:37631 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:32:45,534 DEBUG [RS:0;83f56b55f2af:37631 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:32:45,534 DEBUG [RS:0;83f56b55f2af:37631 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/83f56b55f2af:0, corePoolSize=3, maxPoolSize=3 2024-11-14T09:32:45,534 DEBUG [RS:0;83f56b55f2af:37631 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/83f56b55f2af:0, corePoolSize=3, maxPoolSize=3 2024-11-14T09:32:45,535 INFO [RS:0;83f56b55f2af:37631 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-14T09:32:45,535 INFO [RS:0;83f56b55f2af:37631 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-14T09:32:45,535 INFO [RS:0;83f56b55f2af:37631 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T09:32:45,535 INFO [RS:0;83f56b55f2af:37631 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-14T09:32:45,535 INFO [RS:0;83f56b55f2af:37631 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-14T09:32:45,535 INFO [RS:0;83f56b55f2af:37631 {}] hbase.ChoreService(168): Chore ScheduledChore name=83f56b55f2af,37631,1731576765291-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-14T09:32:45,549 INFO [RS:0;83f56b55f2af:37631 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-14T09:32:45,549 INFO [RS:0;83f56b55f2af:37631 {}] hbase.ChoreService(168): Chore ScheduledChore name=83f56b55f2af,37631,1731576765291-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T09:32:45,549 INFO [RS:0;83f56b55f2af:37631 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T09:32:45,549 INFO [RS:0;83f56b55f2af:37631 {}] regionserver.Replication(171): 83f56b55f2af,37631,1731576765291 started 2024-11-14T09:32:45,562 INFO [RS:0;83f56b55f2af:37631 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T09:32:45,562 INFO [RS:0;83f56b55f2af:37631 {}] regionserver.HRegionServer(1482): Serving as 83f56b55f2af,37631,1731576765291, RpcServer on 83f56b55f2af/172.17.0.2:37631, sessionid=0x10115d285be0001 2024-11-14T09:32:45,563 DEBUG [RS:0;83f56b55f2af:37631 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-14T09:32:45,563 DEBUG [RS:0;83f56b55f2af:37631 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 83f56b55f2af,37631,1731576765291 2024-11-14T09:32:45,563 DEBUG [RS:0;83f56b55f2af:37631 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '83f56b55f2af,37631,1731576765291' 2024-11-14T09:32:45,563 DEBUG [RS:0;83f56b55f2af:37631 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-14T09:32:45,563 DEBUG [RS:0;83f56b55f2af:37631 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-14T09:32:45,563 DEBUG [RS:0;83f56b55f2af:37631 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-14T09:32:45,563 DEBUG [RS:0;83f56b55f2af:37631 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-14T09:32:45,563 DEBUG [RS:0;83f56b55f2af:37631 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 83f56b55f2af,37631,1731576765291 2024-11-14T09:32:45,563 DEBUG [RS:0;83f56b55f2af:37631 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '83f56b55f2af,37631,1731576765291' 2024-11-14T09:32:45,563 DEBUG [RS:0;83f56b55f2af:37631 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-14T09:32:45,564 DEBUG [RS:0;83f56b55f2af:37631 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-14T09:32:45,564 DEBUG [RS:0;83f56b55f2af:37631 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-14T09:32:45,564 INFO [RS:0;83f56b55f2af:37631 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-14T09:32:45,564 INFO [RS:0;83f56b55f2af:37631 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-14T09:32:45,613 WARN [83f56b55f2af:44845 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-14T09:32:45,666 INFO [RS:0;83f56b55f2af:37631 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=83f56b55f2af%2C37631%2C1731576765291, suffix=, logDir=hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/WALs/83f56b55f2af,37631,1731576765291, archiveDir=hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/oldWALs, maxLogs=32 2024-11-14T09:32:45,666 INFO [RS:0;83f56b55f2af:37631 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 83f56b55f2af%2C37631%2C1731576765291.1731576765666 2024-11-14T09:32:45,671 INFO [RS:0;83f56b55f2af:37631 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/WALs/83f56b55f2af,37631,1731576765291/83f56b55f2af%2C37631%2C1731576765291.1731576765666 2024-11-14T09:32:45,672 DEBUG [RS:0;83f56b55f2af:37631 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34449:34449),(127.0.0.1/127.0.0.1:35097:35097)] 2024-11-14T09:32:45,863 DEBUG [83f56b55f2af:44845 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-14T09:32:45,864 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=83f56b55f2af,37631,1731576765291 2024-11-14T09:32:45,865 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 83f56b55f2af,37631,1731576765291, state=OPENING 2024-11-14T09:32:45,867 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-14T09:32:45,868 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37631-0x10115d285be0001, quorum=127.0.0.1:52182, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:32:45,868 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44845-0x10115d285be0000, quorum=127.0.0.1:52182, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:32:45,869 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-14T09:32:45,869 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T09:32:45,869 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T09:32:45,869 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=83f56b55f2af,37631,1731576765291}] 2024-11-14T09:32:45,909 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:45,909 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:46,021 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-14T09:32:46,023 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45061, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-14T09:32:46,026 INFO [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-14T09:32:46,026 INFO [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T09:32:46,028 INFO [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=83f56b55f2af%2C37631%2C1731576765291.meta, suffix=.meta, logDir=hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/WALs/83f56b55f2af,37631,1731576765291, archiveDir=hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/oldWALs, maxLogs=32 2024-11-14T09:32:46,028 INFO [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 83f56b55f2af%2C37631%2C1731576765291.meta.1731576766028.meta 2024-11-14T09:32:46,033 INFO [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/WALs/83f56b55f2af,37631,1731576765291/83f56b55f2af%2C37631%2C1731576765291.meta.1731576766028.meta 2024-11-14T09:32:46,040 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35097:35097),(127.0.0.1/127.0.0.1:34449:34449)] 2024-11-14T09:32:46,044 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-14T09:32:46,045 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-14T09:32:46,045 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-14T09:32:46,045 INFO [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-14T09:32:46,045 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-14T09:32:46,045 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T09:32:46,045 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-14T09:32:46,045 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-14T09:32:46,046 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-14T09:32:46,047 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-14T09:32:46,047 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:32:46,047 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:32:46,047 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-14T09:32:46,048 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-14T09:32:46,048 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:32:46,048 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:32:46,048 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-14T09:32:46,049 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-14T09:32:46,049 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:32:46,049 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:32:46,049 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-14T09:32:46,050 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-14T09:32:46,050 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:32:46,050 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:32:46,050 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-14T09:32:46,051 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/hbase/meta/1588230740 2024-11-14T09:32:46,052 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/hbase/meta/1588230740 2024-11-14T09:32:46,053 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-14T09:32:46,053 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-14T09:32:46,053 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-14T09:32:46,054 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-14T09:32:46,055 INFO [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=812138, jitterRate=0.03268752992153168}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-14T09:32:46,055 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-14T09:32:46,055 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731576766045Writing region info on filesystem at 1731576766045Initializing all the Stores at 1731576766046 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731576766046Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731576766046Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731576766046Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731576766046Cleaning up temporary data from old regions at 1731576766053 (+7 ms)Running coprocessor post-open hooks at 1731576766055 (+2 ms)Region opened successfully at 1731576766055 2024-11-14T09:32:46,056 INFO [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731576766021 2024-11-14T09:32:46,059 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-14T09:32:46,059 INFO [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-14T09:32:46,060 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=83f56b55f2af,37631,1731576765291 2024-11-14T09:32:46,060 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 83f56b55f2af,37631,1731576765291, state=OPEN 2024-11-14T09:32:46,065 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37631-0x10115d285be0001, quorum=127.0.0.1:52182, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-14T09:32:46,065 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44845-0x10115d285be0000, quorum=127.0.0.1:52182, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-14T09:32:46,065 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T09:32:46,065 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T09:32:46,065 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=83f56b55f2af,37631,1731576765291 2024-11-14T09:32:46,068 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-14T09:32:46,068 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=83f56b55f2af,37631,1731576765291 in 196 msec 2024-11-14T09:32:46,070 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-14T09:32:46,070 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 607 msec 2024-11-14T09:32:46,071 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T09:32:46,071 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-14T09:32:46,072 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-14T09:32:46,072 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=83f56b55f2af,37631,1731576765291, seqNum=-1] 2024-11-14T09:32:46,072 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-14T09:32:46,073 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36735, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-14T09:32:46,078 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 651 msec 2024-11-14T09:32:46,078 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731576766078, completionTime=-1 2024-11-14T09:32:46,078 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-14T09:32:46,078 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-14T09:32:46,080 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-14T09:32:46,080 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731576826080 2024-11-14T09:32:46,080 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731576886080 2024-11-14T09:32:46,080 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 1 msec 2024-11-14T09:32:46,080 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=83f56b55f2af,44845,1731576765244-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T09:32:46,080 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=83f56b55f2af,44845,1731576765244-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T09:32:46,080 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=83f56b55f2af,44845,1731576765244-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T09:32:46,080 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-83f56b55f2af:44845, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T09:32:46,081 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-14T09:32:46,081 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-14T09:32:46,082 DEBUG [master/83f56b55f2af:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-14T09:32:46,084 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.763sec 2024-11-14T09:32:46,084 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-14T09:32:46,084 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-14T09:32:46,084 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-14T09:32:46,084 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-14T09:32:46,084 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-14T09:32:46,084 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=83f56b55f2af,44845,1731576765244-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-14T09:32:46,084 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=83f56b55f2af,44845,1731576765244-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-14T09:32:46,086 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-14T09:32:46,086 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-14T09:32:46,086 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=83f56b55f2af,44845,1731576765244-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T09:32:46,105 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1ae2cc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T09:32:46,105 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 83f56b55f2af,44845,-1 for getting cluster id 2024-11-14T09:32:46,105 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-14T09:32:46,106 DEBUG [HMaster-EventLoopGroup-14-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '62b4e73f-5fc9-4a0b-8f00-96c73c495b94' 2024-11-14T09:32:46,106 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-14T09:32:46,106 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "62b4e73f-5fc9-4a0b-8f00-96c73c495b94" 2024-11-14T09:32:46,106 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@d8850d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T09:32:46,106 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [83f56b55f2af,44845,-1] 2024-11-14T09:32:46,107 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-14T09:32:46,107 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T09:32:46,108 INFO [HMaster-EventLoopGroup-14-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39288, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-14T09:32:46,108 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@66eddd7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T09:32:46,109 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-14T09:32:46,109 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=83f56b55f2af,37631,1731576765291, seqNum=-1] 2024-11-14T09:32:46,110 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-14T09:32:46,111 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46058, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-14T09:32:46,112 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=83f56b55f2af,44845,1731576765244 2024-11-14T09:32:46,112 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:32:46,115 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-14T09:32:46,115 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-14T09:32:46,115 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.AsyncConnectionImpl(321): The fetched master address is 83f56b55f2af,44845,1731576765244 2024-11-14T09:32:46,116 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@3d63bf95 2024-11-14T09:32:46,116 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-14T09:32:46,116 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39298, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-14T09:32:46,117 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44845 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-14T09:32:46,117 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44845 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-14T09:32:46,117 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44845 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-14T09:32:46,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44845 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling 2024-11-14T09:32:46,119 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-14T09:32:46,119 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:32:46,119 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44845 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRolling" procId is: 4 2024-11-14T09:32:46,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44845 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-14T09:32:46,120 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-14T09:32:46,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40913 is added to blk_1073741835_1011 (size=381) 2024-11-14T09:32:46,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39945 is added to blk_1073741835_1011 (size=381) 2024-11-14T09:32:46,130 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => b51662f39c14a78ddb454bac85017be1, NAME => 'TestLogRolling-testLogRolling,,1731576766117.b51662f39c14a78ddb454bac85017be1.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de 2024-11-14T09:32:46,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40913 is added to blk_1073741836_1012 (size=64) 2024-11-14T09:32:46,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39945 is added to blk_1073741836_1012 (size=64) 2024-11-14T09:32:46,139 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731576766117.b51662f39c14a78ddb454bac85017be1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T09:32:46,139 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing b51662f39c14a78ddb454bac85017be1, disabling compactions & flushes 2024-11-14T09:32:46,139 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731576766117.b51662f39c14a78ddb454bac85017be1. 2024-11-14T09:32:46,139 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731576766117.b51662f39c14a78ddb454bac85017be1. 2024-11-14T09:32:46,139 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731576766117.b51662f39c14a78ddb454bac85017be1. after waiting 0 ms 2024-11-14T09:32:46,139 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731576766117.b51662f39c14a78ddb454bac85017be1. 2024-11-14T09:32:46,139 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731576766117.b51662f39c14a78ddb454bac85017be1. 2024-11-14T09:32:46,139 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for b51662f39c14a78ddb454bac85017be1: Waiting for close lock at 1731576766139Disabling compacts and flushes for region at 1731576766139Disabling writes for close at 1731576766139Writing region close event to WAL at 1731576766139Closed at 1731576766139 2024-11-14T09:32:46,141 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-14T09:32:46,141 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRolling,,1731576766117.b51662f39c14a78ddb454bac85017be1.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1731576766141"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731576766141"}]},"ts":"1731576766141"} 2024-11-14T09:32:46,143 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-14T09:32:46,144 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-14T09:32:46,144 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731576766144"}]},"ts":"1731576766144"} 2024-11-14T09:32:46,146 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLING in hbase:meta 2024-11-14T09:32:46,146 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=b51662f39c14a78ddb454bac85017be1, ASSIGN}] 2024-11-14T09:32:46,147 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=b51662f39c14a78ddb454bac85017be1, ASSIGN 2024-11-14T09:32:46,148 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=b51662f39c14a78ddb454bac85017be1, ASSIGN; state=OFFLINE, location=83f56b55f2af,37631,1731576765291; forceNewPlan=false, retain=false 2024-11-14T09:32:46,299 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=b51662f39c14a78ddb454bac85017be1, regionState=OPENING, regionLocation=83f56b55f2af,37631,1731576765291 2024-11-14T09:32:46,301 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=b51662f39c14a78ddb454bac85017be1, ASSIGN because future has completed 2024-11-14T09:32:46,301 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure b51662f39c14a78ddb454bac85017be1, server=83f56b55f2af,37631,1731576765291}] 2024-11-14T09:32:46,458 INFO [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1731576766117.b51662f39c14a78ddb454bac85017be1. 2024-11-14T09:32:46,458 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => b51662f39c14a78ddb454bac85017be1, NAME => 'TestLogRolling-testLogRolling,,1731576766117.b51662f39c14a78ddb454bac85017be1.', STARTKEY => '', ENDKEY => ''} 2024-11-14T09:32:46,458 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling b51662f39c14a78ddb454bac85017be1 2024-11-14T09:32:46,458 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731576766117.b51662f39c14a78ddb454bac85017be1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T09:32:46,458 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for b51662f39c14a78ddb454bac85017be1 2024-11-14T09:32:46,458 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for b51662f39c14a78ddb454bac85017be1 2024-11-14T09:32:46,459 INFO [StoreOpener-b51662f39c14a78ddb454bac85017be1-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region b51662f39c14a78ddb454bac85017be1 2024-11-14T09:32:46,460 INFO [StoreOpener-b51662f39c14a78ddb454bac85017be1-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b51662f39c14a78ddb454bac85017be1 columnFamilyName info 2024-11-14T09:32:46,461 DEBUG [StoreOpener-b51662f39c14a78ddb454bac85017be1-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:32:46,461 INFO [StoreOpener-b51662f39c14a78ddb454bac85017be1-1 {}] regionserver.HStore(327): Store=b51662f39c14a78ddb454bac85017be1/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T09:32:46,461 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for b51662f39c14a78ddb454bac85017be1 2024-11-14T09:32:46,462 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1 2024-11-14T09:32:46,462 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1 2024-11-14T09:32:46,462 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for b51662f39c14a78ddb454bac85017be1 2024-11-14T09:32:46,462 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for b51662f39c14a78ddb454bac85017be1 2024-11-14T09:32:46,463 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for b51662f39c14a78ddb454bac85017be1 2024-11-14T09:32:46,465 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T09:32:46,466 INFO [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened b51662f39c14a78ddb454bac85017be1; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=714572, jitterRate=-0.09137529134750366}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-14T09:32:46,466 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for b51662f39c14a78ddb454bac85017be1 2024-11-14T09:32:46,466 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for b51662f39c14a78ddb454bac85017be1: Running coprocessor pre-open hook at 1731576766458Writing region info on filesystem at 1731576766458Initializing all the Stores at 1731576766459 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731576766459Cleaning up temporary data from old regions at 1731576766462 (+3 ms)Running coprocessor post-open hooks at 1731576766466 (+4 ms)Region opened successfully at 1731576766466 2024-11-14T09:32:46,467 INFO [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1731576766117.b51662f39c14a78ddb454bac85017be1., pid=6, masterSystemTime=1731576766454 2024-11-14T09:32:46,469 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1731576766117.b51662f39c14a78ddb454bac85017be1. 2024-11-14T09:32:46,469 INFO [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1731576766117.b51662f39c14a78ddb454bac85017be1. 2024-11-14T09:32:46,470 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=b51662f39c14a78ddb454bac85017be1, regionState=OPEN, openSeqNum=2, regionLocation=83f56b55f2af,37631,1731576765291 2024-11-14T09:32:46,472 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure b51662f39c14a78ddb454bac85017be1, server=83f56b55f2af,37631,1731576765291 because future has completed 2024-11-14T09:32:46,475 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-14T09:32:46,475 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure b51662f39c14a78ddb454bac85017be1, server=83f56b55f2af,37631,1731576765291 in 172 msec 2024-11-14T09:32:46,477 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-14T09:32:46,477 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=b51662f39c14a78ddb454bac85017be1, ASSIGN in 329 msec 2024-11-14T09:32:46,478 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-14T09:32:46,478 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731576766478"}]},"ts":"1731576766478"} 2024-11-14T09:32:46,480 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLED in hbase:meta 2024-11-14T09:32:46,481 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-14T09:32:46,482 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling in 364 msec 2024-11-14T09:32:46,910 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:46,910 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:47,910 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:47,910 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:48,911 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:48,911 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:49,041 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:32:49,041 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:32:49,041 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:32:49,041 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:32:49,041 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:32:49,041 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:32:49,042 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:32:49,042 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:32:49,054 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:32:49,054 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:32:49,054 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:32:49,055 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:32:49,055 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:32:49,055 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:32:49,058 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:32:49,058 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:32:49,058 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:32:49,059 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:32:49,564 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-14T09:32:49,565 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:32:49,565 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:32:49,565 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:32:49,565 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:32:49,565 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:32:49,566 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:32:49,566 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:32:49,566 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:32:49,582 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:32:49,582 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:32:49,582 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:32:49,583 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:32:49,583 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:32:49,583 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:32:49,586 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:32:49,586 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:32:49,587 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:32:49,589 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:32:49,912 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:49,912 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:50,913 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:50,913 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:51,531 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-14T09:32:51,532 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-11-14T09:32:51,793 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-11-14T09:32:51,793 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-14T09:32:51,794 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-14T09:32:51,913 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:51,913 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:52,914 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:52,914 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:53,915 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:53,915 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:54,915 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:54,915 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:55,916 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:55,916 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:56,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44845 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-14T09:32:56,143 INFO [RPCClient-NioEventLoopGroup-4-7 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRolling completed 2024-11-14T09:32:56,143 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRolling,, stopping at row=TestLogRolling-testLogRolling ,, for max=2147483647 with caching=100 2024-11-14T09:32:56,146 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRolling 2024-11-14T09:32:56,146 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRolling,,1731576766117.b51662f39c14a78ddb454bac85017be1. 2024-11-14T09:32:56,148 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testLogRolling,,1731576766117.b51662f39c14a78ddb454bac85017be1., hostname=83f56b55f2af,37631,1731576765291, seqNum=2] 2024-11-14T09:32:56,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37631 {}] regionserver.HRegion(8855): Flush requested on b51662f39c14a78ddb454bac85017be1 2024-11-14T09:32:56,160 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b51662f39c14a78ddb454bac85017be1 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-14T09:32:56,176 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/.tmp/info/1d6cdfe0eda54ca191a1b32878034cb5 is 1080, key is row0001/info:/1731576776150/Put/seqid=0 2024-11-14T09:32:56,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39945 is added to blk_1073741837_1013 (size=12509) 2024-11-14T09:32:56,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40913 is added to blk_1073741837_1013 (size=12509) 2024-11-14T09:32:56,181 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/.tmp/info/1d6cdfe0eda54ca191a1b32878034cb5 2024-11-14T09:32:56,188 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/.tmp/info/1d6cdfe0eda54ca191a1b32878034cb5 as hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/info/1d6cdfe0eda54ca191a1b32878034cb5 2024-11-14T09:32:56,193 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/info/1d6cdfe0eda54ca191a1b32878034cb5, entries=7, sequenceid=11, filesize=12.2 K 2024-11-14T09:32:56,193 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=22.07 KB/22596 for b51662f39c14a78ddb454bac85017be1 in 34ms, sequenceid=11, compaction requested=false 2024-11-14T09:32:56,194 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b51662f39c14a78ddb454bac85017be1: 2024-11-14T09:32:56,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37631 {}] regionserver.HRegion(8855): Flush requested on b51662f39c14a78ddb454bac85017be1 2024-11-14T09:32:56,194 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b51662f39c14a78ddb454bac85017be1 1/1 column families, dataSize=23.12 KB heapSize=25 KB 2024-11-14T09:32:56,198 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/.tmp/info/83ad0066456b4683a6f5e65f44b49228 is 1080, key is row0008/info:/1731576776160/Put/seqid=0 2024-11-14T09:32:56,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39945 is added to blk_1073741838_1014 (size=28684) 2024-11-14T09:32:56,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40913 is added to blk_1073741838_1014 (size=28684) 2024-11-14T09:32:56,203 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=23.12 KB at sequenceid=36 (bloomFilter=true), to=hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/.tmp/info/83ad0066456b4683a6f5e65f44b49228 2024-11-14T09:32:56,208 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/.tmp/info/83ad0066456b4683a6f5e65f44b49228 as hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/info/83ad0066456b4683a6f5e65f44b49228 2024-11-14T09:32:56,212 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/info/83ad0066456b4683a6f5e65f44b49228, entries=22, sequenceid=36, filesize=28.0 K 2024-11-14T09:32:56,213 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.12 KB/23672, heapSize ~24.98 KB/25584, currentSize=3.15 KB/3228 for b51662f39c14a78ddb454bac85017be1 in 19ms, sequenceid=36, compaction requested=false 2024-11-14T09:32:56,213 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b51662f39c14a78ddb454bac85017be1: 2024-11-14T09:32:56,213 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=40.2 K, sizeToCheck=16.0 K 2024-11-14T09:32:56,213 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T09:32:56,213 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/info/83ad0066456b4683a6f5e65f44b49228 because midkey is the same as first or last row 2024-11-14T09:32:56,917 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:56,917 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:57,296 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-14T09:32:57,297 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:32:57,297 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:32:57,298 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:32:57,298 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:32:57,298 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:32:57,298 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:32:57,299 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:32:57,299 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:32:57,316 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:32:57,316 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:32:57,316 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:32:57,316 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:32:57,317 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:32:57,317 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:32:57,320 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:32:57,320 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:32:57,321 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:32:57,322 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:32:57,918 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:57,918 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:58,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37631 {}] regionserver.HRegion(8855): Flush requested on b51662f39c14a78ddb454bac85017be1 2024-11-14T09:32:58,206 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b51662f39c14a78ddb454bac85017be1 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-14T09:32:58,210 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/.tmp/info/1726f7c4af5c473395b52500b65a197b is 1080, key is row0030/info:/1731576776195/Put/seqid=0 2024-11-14T09:32:58,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39945 is added to blk_1073741839_1015 (size=12509) 2024-11-14T09:32:58,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40913 is added to blk_1073741839_1015 (size=12509) 2024-11-14T09:32:58,216 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=46 (bloomFilter=true), to=hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/.tmp/info/1726f7c4af5c473395b52500b65a197b 2024-11-14T09:32:58,222 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/.tmp/info/1726f7c4af5c473395b52500b65a197b as hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/info/1726f7c4af5c473395b52500b65a197b 2024-11-14T09:32:58,227 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/info/1726f7c4af5c473395b52500b65a197b, entries=7, sequenceid=46, filesize=12.2 K 2024-11-14T09:32:58,228 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=13.66 KB/13988 for b51662f39c14a78ddb454bac85017be1 in 23ms, sequenceid=46, compaction requested=true 2024-11-14T09:32:58,228 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b51662f39c14a78ddb454bac85017be1: 2024-11-14T09:32:58,228 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=52.4 K, sizeToCheck=16.0 K 2024-11-14T09:32:58,228 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T09:32:58,228 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/info/83ad0066456b4683a6f5e65f44b49228 because midkey is the same as first or last row 2024-11-14T09:32:58,228 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b51662f39c14a78ddb454bac85017be1:info, priority=-2147483648, current under compaction store size is 1 2024-11-14T09:32:58,228 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T09:32:58,229 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-14T09:32:58,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37631 {}] regionserver.HRegion(8855): Flush requested on b51662f39c14a78ddb454bac85017be1 2024-11-14T09:32:58,229 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b51662f39c14a78ddb454bac85017be1 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-14T09:32:58,230 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 53702 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-14T09:32:58,230 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.HStore(1541): b51662f39c14a78ddb454bac85017be1/info is initiating minor compaction (all files) 2024-11-14T09:32:58,230 INFO [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of b51662f39c14a78ddb454bac85017be1/info in TestLogRolling-testLogRolling,,1731576766117.b51662f39c14a78ddb454bac85017be1. 2024-11-14T09:32:58,230 INFO [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/info/1d6cdfe0eda54ca191a1b32878034cb5, hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/info/83ad0066456b4683a6f5e65f44b49228, hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/info/1726f7c4af5c473395b52500b65a197b] into tmpdir=hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/.tmp, totalSize=52.4 K 2024-11-14T09:32:58,231 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1d6cdfe0eda54ca191a1b32878034cb5, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731576776150 2024-11-14T09:32:58,231 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] compactions.Compactor(225): Compacting 83ad0066456b4683a6f5e65f44b49228, keycount=22, bloomtype=ROW, size=28.0 K, encoding=NONE, compression=NONE, seqNum=36, earliestPutTs=1731576776160 2024-11-14T09:32:58,232 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1726f7c4af5c473395b52500b65a197b, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=46, earliestPutTs=1731576776195 2024-11-14T09:32:58,234 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/.tmp/info/b66ffc151f684df1a0ca1a6df2f502b8 is 1080, key is row0037/info:/1731576778206/Put/seqid=0 2024-11-14T09:32:58,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39945 is added to blk_1073741840_1016 (size=20064) 2024-11-14T09:32:58,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40913 is added to blk_1073741840_1016 (size=20064) 2024-11-14T09:32:58,240 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=63 (bloomFilter=true), to=hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/.tmp/info/b66ffc151f684df1a0ca1a6df2f502b8 2024-11-14T09:32:58,248 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/.tmp/info/b66ffc151f684df1a0ca1a6df2f502b8 as hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/info/b66ffc151f684df1a0ca1a6df2f502b8 2024-11-14T09:32:58,250 INFO [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b51662f39c14a78ddb454bac85017be1#info#compaction#61 average throughput is 18.47 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-14T09:32:58,250 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/.tmp/info/00c20d93f418462bb43e881e197f1d59 is 1080, key is row0001/info:/1731576776150/Put/seqid=0 2024-11-14T09:32:58,253 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/info/b66ffc151f684df1a0ca1a6df2f502b8, entries=14, sequenceid=63, filesize=19.6 K 2024-11-14T09:32:58,254 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=14.71 KB/15064 for b51662f39c14a78ddb454bac85017be1 in 25ms, sequenceid=63, compaction requested=false 2024-11-14T09:32:58,254 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b51662f39c14a78ddb454bac85017be1: 2024-11-14T09:32:58,254 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=72.0 K, sizeToCheck=16.0 K 2024-11-14T09:32:58,254 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T09:32:58,254 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/info/83ad0066456b4683a6f5e65f44b49228 because midkey is the same as first or last row 2024-11-14T09:32:58,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39945 is added to blk_1073741841_1017 (size=43901) 2024-11-14T09:32:58,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40913 is added to blk_1073741841_1017 (size=43901) 2024-11-14T09:32:58,261 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/.tmp/info/00c20d93f418462bb43e881e197f1d59 as hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/info/00c20d93f418462bb43e881e197f1d59 2024-11-14T09:32:58,266 INFO [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in b51662f39c14a78ddb454bac85017be1/info of b51662f39c14a78ddb454bac85017be1 into 00c20d93f418462bb43e881e197f1d59(size=42.9 K), total size for store is 62.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-14T09:32:58,266 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for b51662f39c14a78ddb454bac85017be1: 2024-11-14T09:32:58,266 INFO [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731576766117.b51662f39c14a78ddb454bac85017be1., storeName=b51662f39c14a78ddb454bac85017be1/info, priority=13, startTime=1731576778228; duration=0sec 2024-11-14T09:32:58,266 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=62.5 K, sizeToCheck=16.0 K 2024-11-14T09:32:58,266 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T09:32:58,266 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/info/00c20d93f418462bb43e881e197f1d59 because midkey is the same as first or last row 2024-11-14T09:32:58,266 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=62.5 K, sizeToCheck=16.0 K 2024-11-14T09:32:58,266 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T09:32:58,266 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/info/00c20d93f418462bb43e881e197f1d59 because midkey is the same as first or last row 2024-11-14T09:32:58,266 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=62.5 K, sizeToCheck=16.0 K 2024-11-14T09:32:58,266 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T09:32:58,266 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/info/00c20d93f418462bb43e881e197f1d59 because midkey is the same as first or last row 2024-11-14T09:32:58,266 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T09:32:58,266 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b51662f39c14a78ddb454bac85017be1:info 2024-11-14T09:32:58,918 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:58,918 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:59,919 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:32:59,919 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:33:00,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37631 {}] regionserver.HRegion(8855): Flush requested on b51662f39c14a78ddb454bac85017be1 2024-11-14T09:33:00,256 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b51662f39c14a78ddb454bac85017be1 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-14T09:33:00,260 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/.tmp/info/5fa9eb7173e444ddb571f8e444c03232 is 1080, key is row0051/info:/1731576778230/Put/seqid=0 2024-11-14T09:33:00,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40913 is added to blk_1073741842_1018 (size=21141) 2024-11-14T09:33:00,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39945 is added to blk_1073741842_1018 (size=21141) 2024-11-14T09:33:00,266 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=82 (bloomFilter=true), to=hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/.tmp/info/5fa9eb7173e444ddb571f8e444c03232 2024-11-14T09:33:00,271 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/.tmp/info/5fa9eb7173e444ddb571f8e444c03232 as hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/info/5fa9eb7173e444ddb571f8e444c03232 2024-11-14T09:33:00,276 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/info/5fa9eb7173e444ddb571f8e444c03232, entries=15, sequenceid=82, filesize=20.6 K 2024-11-14T09:33:00,277 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=14.71 KB/15064 for b51662f39c14a78ddb454bac85017be1 in 21ms, sequenceid=82, compaction requested=true 2024-11-14T09:33:00,277 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b51662f39c14a78ddb454bac85017be1: 2024-11-14T09:33:00,277 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=83.1 K, sizeToCheck=16.0 K 2024-11-14T09:33:00,277 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T09:33:00,277 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/info/00c20d93f418462bb43e881e197f1d59 because midkey is the same as first or last row 2024-11-14T09:33:00,277 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b51662f39c14a78ddb454bac85017be1:info, priority=-2147483648, current under compaction store size is 1 2024-11-14T09:33:00,277 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T09:33:00,277 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-14T09:33:00,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37631 {}] regionserver.HRegion(8855): Flush requested on b51662f39c14a78ddb454bac85017be1 2024-11-14T09:33:00,278 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b51662f39c14a78ddb454bac85017be1 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-14T09:33:00,278 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 85106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-14T09:33:00,278 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.HStore(1541): b51662f39c14a78ddb454bac85017be1/info is initiating minor compaction (all files) 2024-11-14T09:33:00,278 INFO [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of b51662f39c14a78ddb454bac85017be1/info in TestLogRolling-testLogRolling,,1731576766117.b51662f39c14a78ddb454bac85017be1. 2024-11-14T09:33:00,279 INFO [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/info/00c20d93f418462bb43e881e197f1d59, hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/info/b66ffc151f684df1a0ca1a6df2f502b8, hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/info/5fa9eb7173e444ddb571f8e444c03232] into tmpdir=hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/.tmp, totalSize=83.1 K 2024-11-14T09:33:00,279 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] compactions.Compactor(225): Compacting 00c20d93f418462bb43e881e197f1d59, keycount=36, bloomtype=ROW, size=42.9 K, encoding=NONE, compression=NONE, seqNum=46, earliestPutTs=1731576776150 2024-11-14T09:33:00,279 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] compactions.Compactor(225): Compacting b66ffc151f684df1a0ca1a6df2f502b8, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=63, earliestPutTs=1731576778206 2024-11-14T09:33:00,280 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5fa9eb7173e444ddb571f8e444c03232, keycount=15, bloomtype=ROW, size=20.6 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1731576778230 2024-11-14T09:33:00,282 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/.tmp/info/eb0be21778424e448b3e237a17c943a6 is 1080, key is row0066/info:/1731576780257/Put/seqid=0 2024-11-14T09:33:00,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40913 is added to blk_1073741843_1019 (size=21141) 2024-11-14T09:33:00,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39945 is added to blk_1073741843_1019 (size=21141) 2024-11-14T09:33:00,288 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=100 (bloomFilter=true), to=hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/.tmp/info/eb0be21778424e448b3e237a17c943a6 2024-11-14T09:33:00,292 INFO [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b51662f39c14a78ddb454bac85017be1#info#compaction#64 average throughput is 22.23 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-14T09:33:00,293 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/.tmp/info/845746a6c4ee4d36a6f0a62fd8c2a7e3 is 1080, key is row0001/info:/1731576776150/Put/seqid=0 2024-11-14T09:33:00,294 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/.tmp/info/eb0be21778424e448b3e237a17c943a6 as hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/info/eb0be21778424e448b3e237a17c943a6 2024-11-14T09:33:00,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40913 is added to blk_1073741844_1020 (size=75378) 2024-11-14T09:33:00,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39945 is added to blk_1073741844_1020 (size=75378) 2024-11-14T09:33:00,299 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/info/eb0be21778424e448b3e237a17c943a6, entries=15, sequenceid=100, filesize=20.6 K 2024-11-14T09:33:00,300 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=12.61 KB/12912 for b51662f39c14a78ddb454bac85017be1 in 22ms, sequenceid=100, compaction requested=false 2024-11-14T09:33:00,300 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b51662f39c14a78ddb454bac85017be1: 2024-11-14T09:33:00,300 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=103.8 K, sizeToCheck=16.0 K 2024-11-14T09:33:00,300 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T09:33:00,301 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/info/00c20d93f418462bb43e881e197f1d59 because midkey is the same as first or last row 2024-11-14T09:33:00,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37631 {}] regionserver.HRegion(8855): Flush requested on b51662f39c14a78ddb454bac85017be1 2024-11-14T09:33:00,301 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b51662f39c14a78ddb454bac85017be1 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-14T09:33:00,302 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/.tmp/info/845746a6c4ee4d36a6f0a62fd8c2a7e3 as hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/info/845746a6c4ee4d36a6f0a62fd8c2a7e3 2024-11-14T09:33:00,305 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/.tmp/info/c1d9d1ed904f4bfeb3003340124c3869 is 1080, key is row0081/info:/1731576780279/Put/seqid=0 2024-11-14T09:33:00,307 INFO [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in b51662f39c14a78ddb454bac85017be1/info of b51662f39c14a78ddb454bac85017be1 into 845746a6c4ee4d36a6f0a62fd8c2a7e3(size=73.6 K), total size for store is 94.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-14T09:33:00,307 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for b51662f39c14a78ddb454bac85017be1: 2024-11-14T09:33:00,307 INFO [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731576766117.b51662f39c14a78ddb454bac85017be1., storeName=b51662f39c14a78ddb454bac85017be1/info, priority=13, startTime=1731576780277; duration=0sec 2024-11-14T09:33:00,307 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=94.3 K, sizeToCheck=16.0 K 2024-11-14T09:33:00,307 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T09:33:00,308 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=94.3 K, sizeToCheck=16.0 K 2024-11-14T09:33:00,308 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T09:33:00,308 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=94.3 K, sizeToCheck=16.0 K 2024-11-14T09:33:00,308 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T09:33:00,309 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1731576766117.b51662f39c14a78ddb454bac85017be1., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T09:33:00,309 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T09:33:00,309 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b51662f39c14a78ddb454bac85017be1:info 2024-11-14T09:33:00,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39945 is added to blk_1073741845_1021 (size=18987) 2024-11-14T09:33:00,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40913 is added to blk_1073741845_1021 (size=18987) 2024-11-14T09:33:00,310 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44845 {}] assignment.AssignmentManager(1355): Split request from 83f56b55f2af,37631,1731576765291, parent={ENCODED => b51662f39c14a78ddb454bac85017be1, NAME => 'TestLogRolling-testLogRolling,,1731576766117.b51662f39c14a78ddb454bac85017be1.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-11-14T09:33:00,310 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/.tmp/info/c1d9d1ed904f4bfeb3003340124c3869 2024-11-14T09:33:00,315 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44845 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=83f56b55f2af,37631,1731576765291 2024-11-14T09:33:00,315 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/.tmp/info/c1d9d1ed904f4bfeb3003340124c3869 as hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/info/c1d9d1ed904f4bfeb3003340124c3869 2024-11-14T09:33:00,319 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44845 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=b51662f39c14a78ddb454bac85017be1, daughterA=e37492aee67741e333835d8cf461e1c0, daughterB=c3c1078303094fd4e13055e981b40c0f 2024-11-14T09:33:00,320 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/info/c1d9d1ed904f4bfeb3003340124c3869, entries=13, sequenceid=116, filesize=18.5 K 2024-11-14T09:33:00,320 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=b51662f39c14a78ddb454bac85017be1, daughterA=e37492aee67741e333835d8cf461e1c0, daughterB=c3c1078303094fd4e13055e981b40c0f 2024-11-14T09:33:00,320 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=b51662f39c14a78ddb454bac85017be1, daughterA=e37492aee67741e333835d8cf461e1c0, daughterB=c3c1078303094fd4e13055e981b40c0f 2024-11-14T09:33:00,320 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=b51662f39c14a78ddb454bac85017be1, daughterA=e37492aee67741e333835d8cf461e1c0, daughterB=c3c1078303094fd4e13055e981b40c0f 2024-11-14T09:33:00,321 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=3.15 KB/3228 for b51662f39c14a78ddb454bac85017be1 in 20ms, sequenceid=116, compaction requested=true 2024-11-14T09:33:00,321 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b51662f39c14a78ddb454bac85017be1: 2024-11-14T09:33:00,321 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=112.8 K, sizeToCheck=16.0 K 2024-11-14T09:33:00,321 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T09:33:00,321 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=112.8 K, sizeToCheck=16.0 K 2024-11-14T09:33:00,321 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T09:33:00,321 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=112.8 K, sizeToCheck=16.0 K 2024-11-14T09:33:00,321 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T09:33:00,321 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1731576766117.b51662f39c14a78ddb454bac85017be1., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=1 2024-11-14T09:33:00,322 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44845 {}] assignment.AssignmentManager(1355): Split request from 83f56b55f2af,37631,1731576765291, parent={ENCODED => b51662f39c14a78ddb454bac85017be1, NAME => 'TestLogRolling-testLogRolling,,1731576766117.b51662f39c14a78ddb454bac85017be1.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-11-14T09:33:00,322 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44845 {}] assignment.AssignmentManager(1370): Ignoring split request from 83f56b55f2af,37631,1731576765291, parent={ENCODED => b51662f39c14a78ddb454bac85017be1, NAME => 'TestLogRolling-testLogRolling,,1731576766117.b51662f39c14a78ddb454bac85017be1.', STARTKEY => '', ENDKEY => ''} because parent is unknown or not open 2024-11-14T09:33:00,326 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=b51662f39c14a78ddb454bac85017be1, UNASSIGN}] 2024-11-14T09:33:00,328 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=b51662f39c14a78ddb454bac85017be1, UNASSIGN 2024-11-14T09:33:00,329 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=b51662f39c14a78ddb454bac85017be1, regionState=CLOSING, regionLocation=83f56b55f2af,37631,1731576765291 2024-11-14T09:33:00,331 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=b51662f39c14a78ddb454bac85017be1, UNASSIGN because future has completed 2024-11-14T09:33:00,332 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-14T09:33:00,332 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure b51662f39c14a78ddb454bac85017be1, server=83f56b55f2af,37631,1731576765291}] 2024-11-14T09:33:00,489 INFO [RS_CLOSE_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(122): Close b51662f39c14a78ddb454bac85017be1 2024-11-14T09:33:00,489 DEBUG [RS_CLOSE_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-14T09:33:00,489 DEBUG [RS_CLOSE_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1722): Closing b51662f39c14a78ddb454bac85017be1, disabling compactions & flushes 2024-11-14T09:33:00,489 INFO [RS_CLOSE_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731576766117.b51662f39c14a78ddb454bac85017be1. 2024-11-14T09:33:00,490 DEBUG [RS_CLOSE_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731576766117.b51662f39c14a78ddb454bac85017be1. 2024-11-14T09:33:00,490 DEBUG [RS_CLOSE_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731576766117.b51662f39c14a78ddb454bac85017be1. after waiting 0 ms 2024-11-14T09:33:00,490 DEBUG [RS_CLOSE_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731576766117.b51662f39c14a78ddb454bac85017be1. 2024-11-14T09:33:00,490 INFO [RS_CLOSE_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(2902): Flushing b51662f39c14a78ddb454bac85017be1 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-11-14T09:33:00,494 DEBUG [RS_CLOSE_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/.tmp/info/fa82039215704c3e936abbc8a5843e22 is 1080, key is row0094/info:/1731576780302/Put/seqid=0 2024-11-14T09:33:00,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40913 is added to blk_1073741846_1022 (size=8193) 2024-11-14T09:33:00,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39945 is added to blk_1073741846_1022 (size=8193) 2024-11-14T09:33:00,498 INFO [RS_CLOSE_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=123 (bloomFilter=true), to=hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/.tmp/info/fa82039215704c3e936abbc8a5843e22 2024-11-14T09:33:00,504 DEBUG [RS_CLOSE_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/.tmp/info/fa82039215704c3e936abbc8a5843e22 as hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/info/fa82039215704c3e936abbc8a5843e22 2024-11-14T09:33:00,509 INFO [RS_CLOSE_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/info/fa82039215704c3e936abbc8a5843e22, entries=3, sequenceid=123, filesize=8.0 K 2024-11-14T09:33:00,510 INFO [RS_CLOSE_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for b51662f39c14a78ddb454bac85017be1 in 19ms, sequenceid=123, compaction requested=true 2024-11-14T09:33:00,510 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731576766117.b51662f39c14a78ddb454bac85017be1.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/info/1d6cdfe0eda54ca191a1b32878034cb5, hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/info/83ad0066456b4683a6f5e65f44b49228, hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/info/00c20d93f418462bb43e881e197f1d59, hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/info/1726f7c4af5c473395b52500b65a197b, hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/info/b66ffc151f684df1a0ca1a6df2f502b8, hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/info/5fa9eb7173e444ddb571f8e444c03232] to archive 2024-11-14T09:33:00,511 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731576766117.b51662f39c14a78ddb454bac85017be1.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-14T09:33:00,513 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731576766117.b51662f39c14a78ddb454bac85017be1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/info/1d6cdfe0eda54ca191a1b32878034cb5 to hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/archive/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/info/1d6cdfe0eda54ca191a1b32878034cb5 2024-11-14T09:33:00,514 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731576766117.b51662f39c14a78ddb454bac85017be1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/info/83ad0066456b4683a6f5e65f44b49228 to hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/archive/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/info/83ad0066456b4683a6f5e65f44b49228 2024-11-14T09:33:00,515 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731576766117.b51662f39c14a78ddb454bac85017be1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/info/00c20d93f418462bb43e881e197f1d59 to hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/archive/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/info/00c20d93f418462bb43e881e197f1d59 2024-11-14T09:33:00,516 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731576766117.b51662f39c14a78ddb454bac85017be1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/info/1726f7c4af5c473395b52500b65a197b to hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/archive/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/info/1726f7c4af5c473395b52500b65a197b 2024-11-14T09:33:00,517 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731576766117.b51662f39c14a78ddb454bac85017be1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/info/b66ffc151f684df1a0ca1a6df2f502b8 to hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/archive/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/info/b66ffc151f684df1a0ca1a6df2f502b8 2024-11-14T09:33:00,518 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731576766117.b51662f39c14a78ddb454bac85017be1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/info/5fa9eb7173e444ddb571f8e444c03232 to hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/archive/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/info/5fa9eb7173e444ddb571f8e444c03232 2024-11-14T09:33:00,524 DEBUG [RS_CLOSE_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/recovered.edits/126.seqid, newMaxSeqId=126, maxSeqId=1 2024-11-14T09:33:00,525 INFO [RS_CLOSE_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731576766117.b51662f39c14a78ddb454bac85017be1. 2024-11-14T09:33:00,525 DEBUG [RS_CLOSE_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1676): Region close journal for b51662f39c14a78ddb454bac85017be1: Waiting for close lock at 1731576780489Running coprocessor pre-close hooks at 1731576780489Disabling compacts and flushes for region at 1731576780489Disabling writes for close at 1731576780490 (+1 ms)Obtaining lock to block concurrent updates at 1731576780490Preparing flush snapshotting stores in b51662f39c14a78ddb454bac85017be1 at 1731576780490Finished memstore snapshotting TestLogRolling-testLogRolling,,1731576766117.b51662f39c14a78ddb454bac85017be1., syncing WAL and waiting on mvcc, flushsize=dataSize=3228, getHeapSize=3696, getOffHeapSize=0, getCellsCount=3 at 1731576780490Flushing stores of TestLogRolling-testLogRolling,,1731576766117.b51662f39c14a78ddb454bac85017be1. at 1731576780491 (+1 ms)Flushing b51662f39c14a78ddb454bac85017be1/info: creating writer at 1731576780491Flushing b51662f39c14a78ddb454bac85017be1/info: appending metadata at 1731576780493 (+2 ms)Flushing b51662f39c14a78ddb454bac85017be1/info: closing flushed file at 1731576780493Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4ae83df2: reopening flushed file at 1731576780503 (+10 ms)Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for b51662f39c14a78ddb454bac85017be1 in 19ms, sequenceid=123, compaction requested=true at 1731576780510 (+7 ms)Writing region close event to WAL at 1731576780521 (+11 ms)Running coprocessor post-close hooks at 1731576780525 (+4 ms)Closed at 1731576780525 2024-11-14T09:33:00,527 INFO [RS_CLOSE_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(157): Closed b51662f39c14a78ddb454bac85017be1 2024-11-14T09:33:00,528 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=b51662f39c14a78ddb454bac85017be1, regionState=CLOSED 2024-11-14T09:33:00,529 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure b51662f39c14a78ddb454bac85017be1, server=83f56b55f2af,37631,1731576765291 because future has completed 2024-11-14T09:33:00,533 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=8 2024-11-14T09:33:00,533 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=8, state=SUCCESS, hasLock=false; CloseRegionProcedure b51662f39c14a78ddb454bac85017be1, server=83f56b55f2af,37631,1731576765291 in 198 msec 2024-11-14T09:33:00,535 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-14T09:33:00,535 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=b51662f39c14a78ddb454bac85017be1, UNASSIGN in 207 msec 2024-11-14T09:33:00,543 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:33:00,547 INFO [PEWorker-4 {}] assignment.SplitTableRegionProcedure(728): pid=7 splitting 4 storefiles, region=b51662f39c14a78ddb454bac85017be1, threads=4 2024-11-14T09:33:00,549 DEBUG [StoreFileSplitter-pool-3 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/info/fa82039215704c3e936abbc8a5843e22 for region: b51662f39c14a78ddb454bac85017be1 2024-11-14T09:33:00,549 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/info/845746a6c4ee4d36a6f0a62fd8c2a7e3 for region: b51662f39c14a78ddb454bac85017be1 2024-11-14T09:33:00,549 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/info/eb0be21778424e448b3e237a17c943a6 for region: b51662f39c14a78ddb454bac85017be1 2024-11-14T09:33:00,549 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/info/c1d9d1ed904f4bfeb3003340124c3869 for region: b51662f39c14a78ddb454bac85017be1 2024-11-14T09:33:00,558 DEBUG [StoreFileSplitter-pool-3 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/info/fa82039215704c3e936abbc8a5843e22, top=true 2024-11-14T09:33:00,559 DEBUG [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/info/c1d9d1ed904f4bfeb3003340124c3869, top=true 2024-11-14T09:33:00,560 DEBUG [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/info/eb0be21778424e448b3e237a17c943a6, top=true 2024-11-14T09:33:00,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40913 is added to blk_1073741847_1023 (size=27) 2024-11-14T09:33:00,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39945 is added to blk_1073741847_1023 (size=27) 2024-11-14T09:33:00,568 INFO [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/TestLogRolling-testLogRolling=b51662f39c14a78ddb454bac85017be1-c1d9d1ed904f4bfeb3003340124c3869 for child: c3c1078303094fd4e13055e981b40c0f, parent: b51662f39c14a78ddb454bac85017be1 2024-11-14T09:33:00,568 INFO [StoreFileSplitter-pool-3 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/TestLogRolling-testLogRolling=b51662f39c14a78ddb454bac85017be1-fa82039215704c3e936abbc8a5843e22 for child: c3c1078303094fd4e13055e981b40c0f, parent: b51662f39c14a78ddb454bac85017be1 2024-11-14T09:33:00,568 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/info/c1d9d1ed904f4bfeb3003340124c3869 for region: b51662f39c14a78ddb454bac85017be1 2024-11-14T09:33:00,568 DEBUG [StoreFileSplitter-pool-3 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/info/fa82039215704c3e936abbc8a5843e22 for region: b51662f39c14a78ddb454bac85017be1 2024-11-14T09:33:00,569 INFO [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/TestLogRolling-testLogRolling=b51662f39c14a78ddb454bac85017be1-eb0be21778424e448b3e237a17c943a6 for child: c3c1078303094fd4e13055e981b40c0f, parent: b51662f39c14a78ddb454bac85017be1 2024-11-14T09:33:00,569 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/info/eb0be21778424e448b3e237a17c943a6 for region: b51662f39c14a78ddb454bac85017be1 2024-11-14T09:33:00,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40913 is added to blk_1073741848_1024 (size=27) 2024-11-14T09:33:00,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39945 is added to blk_1073741848_1024 (size=27) 2024-11-14T09:33:00,577 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/info/845746a6c4ee4d36a6f0a62fd8c2a7e3 for region: b51662f39c14a78ddb454bac85017be1 2024-11-14T09:33:00,578 DEBUG [PEWorker-4 {}] assignment.SplitTableRegionProcedure(802): pid=7 split storefiles for region b51662f39c14a78ddb454bac85017be1 Daughter A: [hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/e37492aee67741e333835d8cf461e1c0/info/845746a6c4ee4d36a6f0a62fd8c2a7e3.b51662f39c14a78ddb454bac85017be1] storefiles, Daughter B: [hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/845746a6c4ee4d36a6f0a62fd8c2a7e3.b51662f39c14a78ddb454bac85017be1, hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/TestLogRolling-testLogRolling=b51662f39c14a78ddb454bac85017be1-c1d9d1ed904f4bfeb3003340124c3869, hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/TestLogRolling-testLogRolling=b51662f39c14a78ddb454bac85017be1-eb0be21778424e448b3e237a17c943a6, hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/TestLogRolling-testLogRolling=b51662f39c14a78ddb454bac85017be1-fa82039215704c3e936abbc8a5843e22] storefiles. 2024-11-14T09:33:00,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40913 is added to blk_1073741849_1025 (size=71) 2024-11-14T09:33:00,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39945 is added to blk_1073741849_1025 (size=71) 2024-11-14T09:33:00,586 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:33:00,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39945 is added to blk_1073741850_1026 (size=71) 2024-11-14T09:33:00,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40913 is added to blk_1073741850_1026 (size=71) 2024-11-14T09:33:00,598 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:33:00,608 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/e37492aee67741e333835d8cf461e1c0/recovered.edits/126.seqid, newMaxSeqId=126, maxSeqId=-1 2024-11-14T09:33:00,610 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/recovered.edits/126.seqid, newMaxSeqId=126, maxSeqId=-1 2024-11-14T09:33:00,612 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1731576766117.b51662f39c14a78ddb454bac85017be1.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1731576780612"},{"qualifier":"splitA","vlen":70,"tag":[],"timestamp":"1731576780612"},{"qualifier":"splitB","vlen":70,"tag":[],"timestamp":"1731576780612"}]},"ts":"1731576780612"} 2024-11-14T09:33:00,612 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1731576780315.e37492aee67741e333835d8cf461e1c0.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1731576780612"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731576780612"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1731576780612"}]},"ts":"1731576780612"} 2024-11-14T09:33:00,612 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,row0062,1731576780315.c3c1078303094fd4e13055e981b40c0f.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1731576780612"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731576780612"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1731576780612"}]},"ts":"1731576780612"} 2024-11-14T09:33:00,630 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=e37492aee67741e333835d8cf461e1c0, ASSIGN}, {pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=c3c1078303094fd4e13055e981b40c0f, ASSIGN}] 2024-11-14T09:33:00,631 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=c3c1078303094fd4e13055e981b40c0f, ASSIGN 2024-11-14T09:33:00,631 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=e37492aee67741e333835d8cf461e1c0, ASSIGN 2024-11-14T09:33:00,632 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=c3c1078303094fd4e13055e981b40c0f, ASSIGN; state=SPLITTING_NEW, location=83f56b55f2af,37631,1731576765291; forceNewPlan=false, retain=false 2024-11-14T09:33:00,632 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=e37492aee67741e333835d8cf461e1c0, ASSIGN; state=SPLITTING_NEW, location=83f56b55f2af,37631,1731576765291; forceNewPlan=false, retain=false 2024-11-14T09:33:00,783 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=e37492aee67741e333835d8cf461e1c0, regionState=OPENING, regionLocation=83f56b55f2af,37631,1731576765291 2024-11-14T09:33:00,783 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=c3c1078303094fd4e13055e981b40c0f, regionState=OPENING, regionLocation=83f56b55f2af,37631,1731576765291 2024-11-14T09:33:00,785 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=e37492aee67741e333835d8cf461e1c0, ASSIGN because future has completed 2024-11-14T09:33:00,786 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure e37492aee67741e333835d8cf461e1c0, server=83f56b55f2af,37631,1731576765291}] 2024-11-14T09:33:00,786 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=c3c1078303094fd4e13055e981b40c0f, ASSIGN because future has completed 2024-11-14T09:33:00,787 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure c3c1078303094fd4e13055e981b40c0f, server=83f56b55f2af,37631,1731576765291}] 2024-11-14T09:33:00,920 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:33:00,920 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:33:00,941 INFO [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1731576780315.e37492aee67741e333835d8cf461e1c0. 2024-11-14T09:33:00,941 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7752): Opening region: {ENCODED => e37492aee67741e333835d8cf461e1c0, NAME => 'TestLogRolling-testLogRolling,,1731576780315.e37492aee67741e333835d8cf461e1c0.', STARTKEY => '', ENDKEY => 'row0062'} 2024-11-14T09:33:00,942 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling e37492aee67741e333835d8cf461e1c0 2024-11-14T09:33:00,942 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731576780315.e37492aee67741e333835d8cf461e1c0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T09:33:00,942 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7794): checking encryption for e37492aee67741e333835d8cf461e1c0 2024-11-14T09:33:00,942 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7797): checking classloading for e37492aee67741e333835d8cf461e1c0 2024-11-14T09:33:00,943 INFO [StoreOpener-e37492aee67741e333835d8cf461e1c0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region e37492aee67741e333835d8cf461e1c0 2024-11-14T09:33:00,944 INFO [StoreOpener-e37492aee67741e333835d8cf461e1c0-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e37492aee67741e333835d8cf461e1c0 columnFamilyName info 2024-11-14T09:33:00,944 DEBUG [StoreOpener-e37492aee67741e333835d8cf461e1c0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:33:00,956 DEBUG [StoreOpener-e37492aee67741e333835d8cf461e1c0-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/e37492aee67741e333835d8cf461e1c0/info/845746a6c4ee4d36a6f0a62fd8c2a7e3.b51662f39c14a78ddb454bac85017be1->hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/info/845746a6c4ee4d36a6f0a62fd8c2a7e3-bottom 2024-11-14T09:33:00,957 INFO [StoreOpener-e37492aee67741e333835d8cf461e1c0-1 {}] regionserver.HStore(327): Store=e37492aee67741e333835d8cf461e1c0/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T09:33:00,957 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1038): replaying wal for e37492aee67741e333835d8cf461e1c0 2024-11-14T09:33:00,958 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/e37492aee67741e333835d8cf461e1c0 2024-11-14T09:33:00,959 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/e37492aee67741e333835d8cf461e1c0 2024-11-14T09:33:00,959 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1048): stopping wal replay for e37492aee67741e333835d8cf461e1c0 2024-11-14T09:33:00,959 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1060): Cleaning up temporary data for e37492aee67741e333835d8cf461e1c0 2024-11-14T09:33:00,960 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1093): writing seq id for e37492aee67741e333835d8cf461e1c0 2024-11-14T09:33:00,961 INFO [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1114): Opened e37492aee67741e333835d8cf461e1c0; next sequenceid=127; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=791213, jitterRate=0.0060804784297943115}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-14T09:33:00,961 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1122): Running coprocessor post-open hooks for e37492aee67741e333835d8cf461e1c0 2024-11-14T09:33:00,961 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1006): Region open journal for e37492aee67741e333835d8cf461e1c0: Running coprocessor pre-open hook at 1731576780942Writing region info on filesystem at 1731576780942Initializing all the Stores at 1731576780943 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731576780943Cleaning up temporary data from old regions at 1731576780959 (+16 ms)Running coprocessor post-open hooks at 1731576780961 (+2 ms)Region opened successfully at 1731576780961 2024-11-14T09:33:00,962 INFO [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1731576780315.e37492aee67741e333835d8cf461e1c0., pid=12, masterSystemTime=1731576780938 2024-11-14T09:33:00,962 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(403): Add compact mark for store e37492aee67741e333835d8cf461e1c0:info, priority=-2147483648, current under compaction store size is 1 2024-11-14T09:33:00,962 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T09:33:00,963 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-11-14T09:33:00,963 INFO [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,,1731576780315.e37492aee67741e333835d8cf461e1c0. 2024-11-14T09:33:00,963 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.HStore(1541): e37492aee67741e333835d8cf461e1c0/info is initiating minor compaction (all files) 2024-11-14T09:33:00,963 INFO [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of e37492aee67741e333835d8cf461e1c0/info in TestLogRolling-testLogRolling,,1731576780315.e37492aee67741e333835d8cf461e1c0. 2024-11-14T09:33:00,963 INFO [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/e37492aee67741e333835d8cf461e1c0/info/845746a6c4ee4d36a6f0a62fd8c2a7e3.b51662f39c14a78ddb454bac85017be1->hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/info/845746a6c4ee4d36a6f0a62fd8c2a7e3-bottom] into tmpdir=hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/e37492aee67741e333835d8cf461e1c0/.tmp, totalSize=73.6 K 2024-11-14T09:33:00,964 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] compactions.Compactor(225): Compacting 845746a6c4ee4d36a6f0a62fd8c2a7e3.b51662f39c14a78ddb454bac85017be1, keycount=32, bloomtype=ROW, size=73.6 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1731576776150 2024-11-14T09:33:00,965 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1731576780315.e37492aee67741e333835d8cf461e1c0. 2024-11-14T09:33:00,965 INFO [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1731576780315.e37492aee67741e333835d8cf461e1c0. 2024-11-14T09:33:00,965 INFO [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,row0062,1731576780315.c3c1078303094fd4e13055e981b40c0f. 2024-11-14T09:33:00,965 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7752): Opening region: {ENCODED => c3c1078303094fd4e13055e981b40c0f, NAME => 'TestLogRolling-testLogRolling,row0062,1731576780315.c3c1078303094fd4e13055e981b40c0f.', STARTKEY => 'row0062', ENDKEY => ''} 2024-11-14T09:33:00,965 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling c3c1078303094fd4e13055e981b40c0f 2024-11-14T09:33:00,965 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,row0062,1731576780315.c3c1078303094fd4e13055e981b40c0f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T09:33:00,965 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7794): checking encryption for c3c1078303094fd4e13055e981b40c0f 2024-11-14T09:33:00,965 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7797): checking classloading for c3c1078303094fd4e13055e981b40c0f 2024-11-14T09:33:00,965 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=e37492aee67741e333835d8cf461e1c0, regionState=OPEN, openSeqNum=127, regionLocation=83f56b55f2af,37631,1731576765291 2024-11-14T09:33:00,966 INFO [StoreOpener-c3c1078303094fd4e13055e981b40c0f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region c3c1078303094fd4e13055e981b40c0f 2024-11-14T09:33:00,967 INFO [StoreOpener-c3c1078303094fd4e13055e981b40c0f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c3c1078303094fd4e13055e981b40c0f columnFamilyName info 2024-11-14T09:33:00,967 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37631 {}] regionserver.HRegion(8855): Flush requested on 1588230740 2024-11-14T09:33:00,967 DEBUG [StoreOpener-c3c1078303094fd4e13055e981b40c0f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:33:00,968 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-11-14T09:33:00,968 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=5.11 KB heapSize=8.96 KB 2024-11-14T09:33:00,968 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure e37492aee67741e333835d8cf461e1c0, server=83f56b55f2af,37631,1731576765291 because future has completed 2024-11-14T09:33:00,975 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=10 2024-11-14T09:33:00,975 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=10, state=SUCCESS, hasLock=false; OpenRegionProcedure e37492aee67741e333835d8cf461e1c0, server=83f56b55f2af,37631,1731576765291 in 187 msec 2024-11-14T09:33:00,976 DEBUG [StoreOpener-c3c1078303094fd4e13055e981b40c0f-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/845746a6c4ee4d36a6f0a62fd8c2a7e3.b51662f39c14a78ddb454bac85017be1->hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/info/845746a6c4ee4d36a6f0a62fd8c2a7e3-top 2024-11-14T09:33:00,977 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=e37492aee67741e333835d8cf461e1c0, ASSIGN in 345 msec 2024-11-14T09:33:00,981 DEBUG [StoreOpener-c3c1078303094fd4e13055e981b40c0f-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/TestLogRolling-testLogRolling=b51662f39c14a78ddb454bac85017be1-c1d9d1ed904f4bfeb3003340124c3869 2024-11-14T09:33:00,984 DEBUG [StoreOpener-c3c1078303094fd4e13055e981b40c0f-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/TestLogRolling-testLogRolling=b51662f39c14a78ddb454bac85017be1-eb0be21778424e448b3e237a17c943a6 2024-11-14T09:33:00,985 INFO [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e37492aee67741e333835d8cf461e1c0#info#compaction#67 average throughput is 31.30 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-14T09:33:00,986 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/e37492aee67741e333835d8cf461e1c0/.tmp/info/15e2276891754d6d9fc987ff39e89319 is 1080, key is row0001/info:/1731576776150/Put/seqid=0 2024-11-14T09:33:00,988 DEBUG [StoreOpener-c3c1078303094fd4e13055e981b40c0f-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/TestLogRolling-testLogRolling=b51662f39c14a78ddb454bac85017be1-fa82039215704c3e936abbc8a5843e22 2024-11-14T09:33:00,988 INFO [StoreOpener-c3c1078303094fd4e13055e981b40c0f-1 {}] regionserver.HStore(327): Store=c3c1078303094fd4e13055e981b40c0f/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T09:33:00,989 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1038): replaying wal for c3c1078303094fd4e13055e981b40c0f 2024-11-14T09:33:00,989 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f 2024-11-14T09:33:00,990 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/hbase/meta/1588230740/.tmp/info/4e1489ae29864855bccd33f07b505c5b is 193, key is TestLogRolling-testLogRolling,row0062,1731576780315.c3c1078303094fd4e13055e981b40c0f./info:regioninfo/1731576780783/Put/seqid=0 2024-11-14T09:33:00,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40913 is added to blk_1073741851_1027 (size=70862) 2024-11-14T09:33:00,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39945 is added to blk_1073741851_1027 (size=70862) 2024-11-14T09:33:00,991 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f 2024-11-14T09:33:00,991 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1048): stopping wal replay for c3c1078303094fd4e13055e981b40c0f 2024-11-14T09:33:00,991 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1060): Cleaning up temporary data for c3c1078303094fd4e13055e981b40c0f 2024-11-14T09:33:00,993 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1093): writing seq id for c3c1078303094fd4e13055e981b40c0f 2024-11-14T09:33:00,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39945 is added to blk_1073741852_1028 (size=9847) 2024-11-14T09:33:00,994 INFO [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1114): Opened c3c1078303094fd4e13055e981b40c0f; next sequenceid=127; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=722743, jitterRate=-0.08098599314689636}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-14T09:33:00,995 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1122): Running coprocessor post-open hooks for c3c1078303094fd4e13055e981b40c0f 2024-11-14T09:33:00,995 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1006): Region open journal for c3c1078303094fd4e13055e981b40c0f: Running coprocessor pre-open hook at 1731576780965Writing region info on filesystem at 1731576780965Initializing all the Stores at 1731576780966 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731576780966Cleaning up temporary data from old regions at 1731576780991 (+25 ms)Running coprocessor post-open hooks at 1731576780995 (+4 ms)Region opened successfully at 1731576780995 2024-11-14T09:33:00,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40913 is added to blk_1073741852_1028 (size=9847) 2024-11-14T09:33:00,995 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.92 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/hbase/meta/1588230740/.tmp/info/4e1489ae29864855bccd33f07b505c5b 2024-11-14T09:33:00,996 INFO [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,row0062,1731576780315.c3c1078303094fd4e13055e981b40c0f., pid=13, masterSystemTime=1731576780938 2024-11-14T09:33:00,996 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(403): Add compact mark for store c3c1078303094fd4e13055e981b40c0f:info, priority=-2147483648, current under compaction store size is 2 2024-11-14T09:33:00,996 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T09:33:00,996 DEBUG [RS:0;83f56b55f2af:37631-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-14T09:33:00,996 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/e37492aee67741e333835d8cf461e1c0/.tmp/info/15e2276891754d6d9fc987ff39e89319 as hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/e37492aee67741e333835d8cf461e1c0/info/15e2276891754d6d9fc987ff39e89319 2024-11-14T09:33:00,997 INFO [RS:0;83f56b55f2af:37631-longCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,row0062,1731576780315.c3c1078303094fd4e13055e981b40c0f. 2024-11-14T09:33:00,997 DEBUG [RS:0;83f56b55f2af:37631-longCompactions-0 {}] regionserver.HStore(1541): c3c1078303094fd4e13055e981b40c0f/info is initiating minor compaction (all files) 2024-11-14T09:33:00,997 INFO [RS:0;83f56b55f2af:37631-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of c3c1078303094fd4e13055e981b40c0f/info in TestLogRolling-testLogRolling,row0062,1731576780315.c3c1078303094fd4e13055e981b40c0f. 2024-11-14T09:33:00,998 INFO [RS:0;83f56b55f2af:37631-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/845746a6c4ee4d36a6f0a62fd8c2a7e3.b51662f39c14a78ddb454bac85017be1->hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/info/845746a6c4ee4d36a6f0a62fd8c2a7e3-top, hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/TestLogRolling-testLogRolling=b51662f39c14a78ddb454bac85017be1-eb0be21778424e448b3e237a17c943a6, hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/TestLogRolling-testLogRolling=b51662f39c14a78ddb454bac85017be1-c1d9d1ed904f4bfeb3003340124c3869, hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/TestLogRolling-testLogRolling=b51662f39c14a78ddb454bac85017be1-fa82039215704c3e936abbc8a5843e22] into tmpdir=hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/.tmp, totalSize=120.8 K 2024-11-14T09:33:00,998 DEBUG [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,row0062,1731576780315.c3c1078303094fd4e13055e981b40c0f. 2024-11-14T09:33:00,998 INFO [RS_OPEN_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,row0062,1731576780315.c3c1078303094fd4e13055e981b40c0f. 2024-11-14T09:33:00,998 DEBUG [RS:0;83f56b55f2af:37631-longCompactions-0 {}] compactions.Compactor(225): Compacting 845746a6c4ee4d36a6f0a62fd8c2a7e3.b51662f39c14a78ddb454bac85017be1, keycount=32, bloomtype=ROW, size=73.6 K, encoding=NONE, compression=NONE, seqNum=83, earliestPutTs=1731576776150 2024-11-14T09:33:00,999 DEBUG [RS:0;83f56b55f2af:37631-longCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=b51662f39c14a78ddb454bac85017be1-eb0be21778424e448b3e237a17c943a6, keycount=15, bloomtype=ROW, size=20.6 K, encoding=NONE, compression=NONE, seqNum=100, earliestPutTs=1731576780257 2024-11-14T09:33:00,999 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=c3c1078303094fd4e13055e981b40c0f, regionState=OPEN, openSeqNum=127, regionLocation=83f56b55f2af,37631,1731576765291 2024-11-14T09:33:00,999 DEBUG [RS:0;83f56b55f2af:37631-longCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=b51662f39c14a78ddb454bac85017be1-c1d9d1ed904f4bfeb3003340124c3869, keycount=13, bloomtype=ROW, size=18.5 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1731576780279 2024-11-14T09:33:00,999 DEBUG [RS:0;83f56b55f2af:37631-longCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=b51662f39c14a78ddb454bac85017be1-fa82039215704c3e936abbc8a5843e22, keycount=3, bloomtype=ROW, size=8.0 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1731576780302 2024-11-14T09:33:01,002 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=13, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure c3c1078303094fd4e13055e981b40c0f, server=83f56b55f2af,37631,1731576765291 because future has completed 2024-11-14T09:33:01,002 INFO [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 1 (all) file(s) in e37492aee67741e333835d8cf461e1c0/info of e37492aee67741e333835d8cf461e1c0 into 15e2276891754d6d9fc987ff39e89319(size=69.2 K), total size for store is 69.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-14T09:33:01,002 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for e37492aee67741e333835d8cf461e1c0: 2024-11-14T09:33:01,002 INFO [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731576780315.e37492aee67741e333835d8cf461e1c0., storeName=e37492aee67741e333835d8cf461e1c0/info, priority=15, startTime=1731576780962; duration=0sec 2024-11-14T09:33:01,002 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T09:33:01,002 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e37492aee67741e333835d8cf461e1c0:info 2024-11-14T09:33:01,005 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=11 2024-11-14T09:33:01,005 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=11, state=SUCCESS, hasLock=false; OpenRegionProcedure c3c1078303094fd4e13055e981b40c0f, server=83f56b55f2af,37631,1731576765291 in 216 msec 2024-11-14T09:33:01,007 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=7 2024-11-14T09:33:01,007 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=c3c1078303094fd4e13055e981b40c0f, ASSIGN in 375 msec 2024-11-14T09:33:01,009 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=b51662f39c14a78ddb454bac85017be1, daughterA=e37492aee67741e333835d8cf461e1c0, daughterB=c3c1078303094fd4e13055e981b40c0f in 691 msec 2024-11-14T09:33:01,016 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/hbase/meta/1588230740/.tmp/ns/a7c446af93384bfc9da7517977883e20 is 43, key is default/ns:d/1731576766074/Put/seqid=0 2024-11-14T09:33:01,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40913 is added to blk_1073741853_1029 (size=5153) 2024-11-14T09:33:01,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39945 is added to blk_1073741853_1029 (size=5153) 2024-11-14T09:33:01,021 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/hbase/meta/1588230740/.tmp/ns/a7c446af93384bfc9da7517977883e20 2024-11-14T09:33:01,024 INFO [RS:0;83f56b55f2af:37631-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c3c1078303094fd4e13055e981b40c0f#info#compaction#70 average throughput is 35.92 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-14T09:33:01,025 DEBUG [RS:0;83f56b55f2af:37631-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/.tmp/info/9b170149b86749aea1b3411ffa386013 is 1080, key is row0062/info:/1731576778249/Put/seqid=0 2024-11-14T09:33:01,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40913 is added to blk_1073741854_1030 (size=43081) 2024-11-14T09:33:01,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39945 is added to blk_1073741854_1030 (size=43081) 2024-11-14T09:33:01,039 DEBUG [RS:0;83f56b55f2af:37631-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/.tmp/info/9b170149b86749aea1b3411ffa386013 as hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/9b170149b86749aea1b3411ffa386013 2024-11-14T09:33:01,044 INFO [RS:0;83f56b55f2af:37631-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 4 (all) file(s) in c3c1078303094fd4e13055e981b40c0f/info of c3c1078303094fd4e13055e981b40c0f into 9b170149b86749aea1b3411ffa386013(size=42.1 K), total size for store is 42.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-14T09:33:01,044 DEBUG [RS:0;83f56b55f2af:37631-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for c3c1078303094fd4e13055e981b40c0f: 2024-11-14T09:33:01,044 INFO [RS:0;83f56b55f2af:37631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731576780315.c3c1078303094fd4e13055e981b40c0f., storeName=c3c1078303094fd4e13055e981b40c0f/info, priority=12, startTime=1731576780996; duration=0sec 2024-11-14T09:33:01,044 DEBUG [RS:0;83f56b55f2af:37631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T09:33:01,044 DEBUG [RS:0;83f56b55f2af:37631-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c3c1078303094fd4e13055e981b40c0f:info 2024-11-14T09:33:01,044 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/hbase/meta/1588230740/.tmp/table/3034c2acbf15441aad4f4be5e30c7a01 is 65, key is TestLogRolling-testLogRolling/table:state/1731576766478/Put/seqid=0 2024-11-14T09:33:01,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40913 is added to blk_1073741855_1031 (size=5340) 2024-11-14T09:33:01,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39945 is added to blk_1073741855_1031 (size=5340) 2024-11-14T09:33:01,049 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=122 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/hbase/meta/1588230740/.tmp/table/3034c2acbf15441aad4f4be5e30c7a01 2024-11-14T09:33:01,053 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/hbase/meta/1588230740/.tmp/info/4e1489ae29864855bccd33f07b505c5b as hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/hbase/meta/1588230740/info/4e1489ae29864855bccd33f07b505c5b 2024-11-14T09:33:01,057 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/hbase/meta/1588230740/info/4e1489ae29864855bccd33f07b505c5b, entries=30, sequenceid=17, filesize=9.6 K 2024-11-14T09:33:01,058 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/hbase/meta/1588230740/.tmp/ns/a7c446af93384bfc9da7517977883e20 as hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/hbase/meta/1588230740/ns/a7c446af93384bfc9da7517977883e20 2024-11-14T09:33:01,061 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/hbase/meta/1588230740/ns/a7c446af93384bfc9da7517977883e20, entries=2, sequenceid=17, filesize=5.0 K 2024-11-14T09:33:01,062 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/hbase/meta/1588230740/.tmp/table/3034c2acbf15441aad4f4be5e30c7a01 as hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/hbase/meta/1588230740/table/3034c2acbf15441aad4f4be5e30c7a01 2024-11-14T09:33:01,066 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/hbase/meta/1588230740/table/3034c2acbf15441aad4f4be5e30c7a01, entries=2, sequenceid=17, filesize=5.2 K 2024-11-14T09:33:01,067 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.11 KB/5234, heapSize ~8.66 KB/8872, currentSize=705 B/705 for 1588230740 in 99ms, sequenceid=17, compaction requested=false 2024-11-14T09:33:01,067 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-14T09:33:01,920 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:33:01,920 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:33:02,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37631 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:46058 deadline: 1731576792307, exception=org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731576766117.b51662f39c14a78ddb454bac85017be1. is not online on 83f56b55f2af,37631,1731576765291 2024-11-14T09:33:02,331 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1731576766117.b51662f39c14a78ddb454bac85017be1., hostname=83f56b55f2af,37631,1731576765291, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1731576766117.b51662f39c14a78ddb454bac85017be1., hostname=83f56b55f2af,37631,1731576765291, seqNum=2, error=org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731576766117.b51662f39c14a78ddb454bac85017be1. is not online on 83f56b55f2af,37631,1731576765291 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-14T09:33:02,332 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1731576766117.b51662f39c14a78ddb454bac85017be1., hostname=83f56b55f2af,37631,1731576765291, seqNum=2 is org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731576766117.b51662f39c14a78ddb454bac85017be1. is not online on 83f56b55f2af,37631,1731576765291 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-14T09:33:02,332 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(88): Try removing region=TestLogRolling-testLogRolling,,1731576766117.b51662f39c14a78ddb454bac85017be1., hostname=83f56b55f2af,37631,1731576765291, seqNum=2 from cache 2024-11-14T09:33:02,921 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:33:02,921 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:33:03,922 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:33:03,922 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:33:04,922 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:33:04,922 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:33:05,923 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:33:05,923 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:33:06,027 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-14T09:33:06,028 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:33:06,028 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:33:06,028 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:33:06,028 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:33:06,028 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:33:06,028 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:33:06,029 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:33:06,029 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:33:06,046 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:33:06,047 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:33:06,047 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:33:06,047 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:33:06,047 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:33:06,047 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:33:06,050 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:33:06,051 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:33:06,051 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:33:06,053 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:33:06,924 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:33:06,924 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:33:07,925 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:33:07,925 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:33:08,925 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:33:08,925 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:33:09,926 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:33:09,926 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:33:10,927 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:33:10,927 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:33:11,928 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:33:11,928 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:33:12,369 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0097', locateType=CURRENT is [region=TestLogRolling-testLogRolling,row0062,1731576780315.c3c1078303094fd4e13055e981b40c0f., hostname=83f56b55f2af,37631,1731576765291, seqNum=127] 2024-11-14T09:33:12,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37631 {}] regionserver.HRegion(8855): Flush requested on c3c1078303094fd4e13055e981b40c0f 2024-11-14T09:33:12,379 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c3c1078303094fd4e13055e981b40c0f 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-14T09:33:12,384 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/.tmp/info/0f4fbb71d7e947f1ae28c7b235122bae is 1080, key is row0097/info:/1731576792370/Put/seqid=0 2024-11-14T09:33:12,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40913 is added to blk_1073741856_1032 (size=12516) 2024-11-14T09:33:12,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39945 is added to blk_1073741856_1032 (size=12516) 2024-11-14T09:33:12,390 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=137 (bloomFilter=true), to=hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/.tmp/info/0f4fbb71d7e947f1ae28c7b235122bae 2024-11-14T09:33:12,396 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/.tmp/info/0f4fbb71d7e947f1ae28c7b235122bae as hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/0f4fbb71d7e947f1ae28c7b235122bae 2024-11-14T09:33:12,401 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/0f4fbb71d7e947f1ae28c7b235122bae, entries=7, sequenceid=137, filesize=12.2 K 2024-11-14T09:33:12,402 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=14.71 KB/15064 for c3c1078303094fd4e13055e981b40c0f in 23ms, sequenceid=137, compaction requested=false 2024-11-14T09:33:12,402 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c3c1078303094fd4e13055e981b40c0f: 2024-11-14T09:33:12,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37631 {}] regionserver.HRegion(8855): Flush requested on c3c1078303094fd4e13055e981b40c0f 2024-11-14T09:33:12,403 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c3c1078303094fd4e13055e981b40c0f 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-14T09:33:12,407 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/.tmp/info/af4bf3d3cff44190af8e7c8ed8d9ac0a is 1080, key is row0104/info:/1731576792380/Put/seqid=0 2024-11-14T09:33:12,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39945 is added to blk_1073741857_1033 (size=21156) 2024-11-14T09:33:12,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40913 is added to blk_1073741857_1033 (size=21156) 2024-11-14T09:33:12,413 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=155 (bloomFilter=true), to=hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/.tmp/info/af4bf3d3cff44190af8e7c8ed8d9ac0a 2024-11-14T09:33:12,419 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/.tmp/info/af4bf3d3cff44190af8e7c8ed8d9ac0a as hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/af4bf3d3cff44190af8e7c8ed8d9ac0a 2024-11-14T09:33:12,422 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/af4bf3d3cff44190af8e7c8ed8d9ac0a, entries=15, sequenceid=155, filesize=20.7 K 2024-11-14T09:33:12,423 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=10.51 KB/10760 for c3c1078303094fd4e13055e981b40c0f in 20ms, sequenceid=155, compaction requested=true 2024-11-14T09:33:12,423 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c3c1078303094fd4e13055e981b40c0f: 2024-11-14T09:33:12,423 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c3c1078303094fd4e13055e981b40c0f:info, priority=-2147483648, current under compaction store size is 1 2024-11-14T09:33:12,423 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T09:33:12,423 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-14T09:33:12,424 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 76753 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-14T09:33:12,424 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.HStore(1541): c3c1078303094fd4e13055e981b40c0f/info is initiating minor compaction (all files) 2024-11-14T09:33:12,424 INFO [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of c3c1078303094fd4e13055e981b40c0f/info in TestLogRolling-testLogRolling,row0062,1731576780315.c3c1078303094fd4e13055e981b40c0f. 2024-11-14T09:33:12,425 INFO [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/9b170149b86749aea1b3411ffa386013, hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/0f4fbb71d7e947f1ae28c7b235122bae, hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/af4bf3d3cff44190af8e7c8ed8d9ac0a] into tmpdir=hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/.tmp, totalSize=75.0 K 2024-11-14T09:33:12,425 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9b170149b86749aea1b3411ffa386013, keycount=35, bloomtype=ROW, size=42.1 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1731576778249 2024-11-14T09:33:12,425 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] compactions.Compactor(225): Compacting 0f4fbb71d7e947f1ae28c7b235122bae, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1731576792370 2024-11-14T09:33:12,425 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] compactions.Compactor(225): Compacting af4bf3d3cff44190af8e7c8ed8d9ac0a, keycount=15, bloomtype=ROW, size=20.7 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1731576792380 2024-11-14T09:33:12,434 INFO [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c3c1078303094fd4e13055e981b40c0f#info#compaction#74 average throughput is 58.49 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-14T09:33:12,435 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/.tmp/info/e89d41f770734ae2906f3a272e054e69 is 1080, key is row0062/info:/1731576778249/Put/seqid=0 2024-11-14T09:33:12,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39945 is added to blk_1073741858_1034 (size=66967) 2024-11-14T09:33:12,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40913 is added to blk_1073741858_1034 (size=66967) 2024-11-14T09:33:12,444 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/.tmp/info/e89d41f770734ae2906f3a272e054e69 as hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/e89d41f770734ae2906f3a272e054e69 2024-11-14T09:33:12,450 INFO [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in c3c1078303094fd4e13055e981b40c0f/info of c3c1078303094fd4e13055e981b40c0f into e89d41f770734ae2906f3a272e054e69(size=65.4 K), total size for store is 65.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-14T09:33:12,450 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for c3c1078303094fd4e13055e981b40c0f: 2024-11-14T09:33:12,450 INFO [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731576780315.c3c1078303094fd4e13055e981b40c0f., storeName=c3c1078303094fd4e13055e981b40c0f/info, priority=13, startTime=1731576792423; duration=0sec 2024-11-14T09:33:12,450 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T09:33:12,450 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c3c1078303094fd4e13055e981b40c0f:info 2024-11-14T09:33:12,928 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:33:12,928 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:33:13,929 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:33:13,929 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:33:14,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37631 {}] regionserver.HRegion(8855): Flush requested on c3c1078303094fd4e13055e981b40c0f 2024-11-14T09:33:14,421 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c3c1078303094fd4e13055e981b40c0f 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-14T09:33:14,425 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/.tmp/info/5f321a956c224c53b0d136f2538657b3 is 1080, key is row0119/info:/1731576792404/Put/seqid=0 2024-11-14T09:33:14,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40913 is added to blk_1073741859_1035 (size=16828) 2024-11-14T09:33:14,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39945 is added to blk_1073741859_1035 (size=16828) 2024-11-14T09:33:14,431 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=170 (bloomFilter=true), to=hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/.tmp/info/5f321a956c224c53b0d136f2538657b3 2024-11-14T09:33:14,437 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/.tmp/info/5f321a956c224c53b0d136f2538657b3 as hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/5f321a956c224c53b0d136f2538657b3 2024-11-14T09:33:14,442 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/5f321a956c224c53b0d136f2538657b3, entries=11, sequenceid=170, filesize=16.4 K 2024-11-14T09:33:14,443 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=12.61 KB/12912 for c3c1078303094fd4e13055e981b40c0f in 22ms, sequenceid=170, compaction requested=false 2024-11-14T09:33:14,443 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c3c1078303094fd4e13055e981b40c0f: 2024-11-14T09:33:14,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37631 {}] regionserver.HRegion(8855): Flush requested on c3c1078303094fd4e13055e981b40c0f 2024-11-14T09:33:14,445 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c3c1078303094fd4e13055e981b40c0f 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-14T09:33:14,449 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/.tmp/info/9be5c95cb370412692c99832b101951f is 1080, key is row0130/info:/1731576794422/Put/seqid=0 2024-11-14T09:33:14,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40913 is added to blk_1073741860_1036 (size=20078) 2024-11-14T09:33:14,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39945 is added to blk_1073741860_1036 (size=20078) 2024-11-14T09:33:14,455 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=187 (bloomFilter=true), to=hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/.tmp/info/9be5c95cb370412692c99832b101951f 2024-11-14T09:33:14,460 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/.tmp/info/9be5c95cb370412692c99832b101951f as hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/9be5c95cb370412692c99832b101951f 2024-11-14T09:33:14,465 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/9be5c95cb370412692c99832b101951f, entries=14, sequenceid=187, filesize=19.6 K 2024-11-14T09:33:14,465 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=12.61 KB/12912 for c3c1078303094fd4e13055e981b40c0f in 20ms, sequenceid=187, compaction requested=true 2024-11-14T09:33:14,466 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c3c1078303094fd4e13055e981b40c0f: 2024-11-14T09:33:14,466 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c3c1078303094fd4e13055e981b40c0f:info, priority=-2147483648, current under compaction store size is 1 2024-11-14T09:33:14,466 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T09:33:14,466 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-14T09:33:14,467 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 103873 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-14T09:33:14,467 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.HStore(1541): c3c1078303094fd4e13055e981b40c0f/info is initiating minor compaction (all files) 2024-11-14T09:33:14,467 INFO [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of c3c1078303094fd4e13055e981b40c0f/info in TestLogRolling-testLogRolling,row0062,1731576780315.c3c1078303094fd4e13055e981b40c0f. 2024-11-14T09:33:14,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37631 {}] regionserver.HRegion(8855): Flush requested on c3c1078303094fd4e13055e981b40c0f 2024-11-14T09:33:14,467 INFO [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/e89d41f770734ae2906f3a272e054e69, hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/5f321a956c224c53b0d136f2538657b3, hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/9be5c95cb370412692c99832b101951f] into tmpdir=hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/.tmp, totalSize=101.4 K 2024-11-14T09:33:14,467 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c3c1078303094fd4e13055e981b40c0f 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-14T09:33:14,467 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] compactions.Compactor(225): Compacting e89d41f770734ae2906f3a272e054e69, keycount=57, bloomtype=ROW, size=65.4 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1731576778249 2024-11-14T09:33:14,468 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5f321a956c224c53b0d136f2538657b3, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1731576792404 2024-11-14T09:33:14,468 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9be5c95cb370412692c99832b101951f, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=187, earliestPutTs=1731576794422 2024-11-14T09:33:14,471 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/.tmp/info/147fa0cf0da2493f97920f97dbcdec72 is 1080, key is row0144/info:/1731576794446/Put/seqid=0 2024-11-14T09:33:14,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40913 is added to blk_1073741861_1037 (size=20078) 2024-11-14T09:33:14,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39945 is added to blk_1073741861_1037 (size=20078) 2024-11-14T09:33:14,478 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=204 (bloomFilter=true), to=hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/.tmp/info/147fa0cf0da2493f97920f97dbcdec72 2024-11-14T09:33:14,481 INFO [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c3c1078303094fd4e13055e981b40c0f#info#compaction#78 average throughput is 42.07 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-14T09:33:14,482 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/.tmp/info/c730dde86f094908b06f984f0f7f61ea is 1080, key is row0062/info:/1731576778249/Put/seqid=0 2024-11-14T09:33:14,483 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/.tmp/info/147fa0cf0da2493f97920f97dbcdec72 as hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/147fa0cf0da2493f97920f97dbcdec72 2024-11-14T09:33:14,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40913 is added to blk_1073741862_1038 (size=94096) 2024-11-14T09:33:14,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39945 is added to blk_1073741862_1038 (size=94096) 2024-11-14T09:33:14,491 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/.tmp/info/c730dde86f094908b06f984f0f7f61ea as hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/c730dde86f094908b06f984f0f7f61ea 2024-11-14T09:33:14,493 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/147fa0cf0da2493f97920f97dbcdec72, entries=14, sequenceid=204, filesize=19.6 K 2024-11-14T09:33:14,494 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=3.15 KB/3228 for c3c1078303094fd4e13055e981b40c0f in 27ms, sequenceid=204, compaction requested=false 2024-11-14T09:33:14,494 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c3c1078303094fd4e13055e981b40c0f: 2024-11-14T09:33:14,497 INFO [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in c3c1078303094fd4e13055e981b40c0f/info of c3c1078303094fd4e13055e981b40c0f into c730dde86f094908b06f984f0f7f61ea(size=91.9 K), total size for store is 111.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-14T09:33:14,497 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for c3c1078303094fd4e13055e981b40c0f: 2024-11-14T09:33:14,497 INFO [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731576780315.c3c1078303094fd4e13055e981b40c0f., storeName=c3c1078303094fd4e13055e981b40c0f/info, priority=13, startTime=1731576794466; duration=0sec 2024-11-14T09:33:14,497 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T09:33:14,497 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c3c1078303094fd4e13055e981b40c0f:info 2024-11-14T09:33:14,930 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:33:14,930 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:33:15,228 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-14T09:33:15,930 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:33:15,930 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:33:16,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37631 {}] regionserver.HRegion(8855): Flush requested on c3c1078303094fd4e13055e981b40c0f 2024-11-14T09:33:16,479 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c3c1078303094fd4e13055e981b40c0f 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-14T09:33:16,484 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/.tmp/info/6356fb87e9e345139aab528738a88204 is 1080, key is row0158/info:/1731576794468/Put/seqid=0 2024-11-14T09:33:16,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40913 is added to blk_1073741863_1039 (size=12516) 2024-11-14T09:33:16,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39945 is added to blk_1073741863_1039 (size=12516) 2024-11-14T09:33:16,489 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=215 (bloomFilter=true), to=hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/.tmp/info/6356fb87e9e345139aab528738a88204 2024-11-14T09:33:16,495 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/.tmp/info/6356fb87e9e345139aab528738a88204 as hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/6356fb87e9e345139aab528738a88204 2024-11-14T09:33:16,499 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/6356fb87e9e345139aab528738a88204, entries=7, sequenceid=215, filesize=12.2 K 2024-11-14T09:33:16,500 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=13.66 KB/13988 for c3c1078303094fd4e13055e981b40c0f in 21ms, sequenceid=215, compaction requested=true 2024-11-14T09:33:16,500 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c3c1078303094fd4e13055e981b40c0f: 2024-11-14T09:33:16,500 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c3c1078303094fd4e13055e981b40c0f:info, priority=-2147483648, current under compaction store size is 1 2024-11-14T09:33:16,500 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T09:33:16,501 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-14T09:33:16,501 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 126690 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-14T09:33:16,502 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.HStore(1541): c3c1078303094fd4e13055e981b40c0f/info is initiating minor compaction (all files) 2024-11-14T09:33:16,502 INFO [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of c3c1078303094fd4e13055e981b40c0f/info in TestLogRolling-testLogRolling,row0062,1731576780315.c3c1078303094fd4e13055e981b40c0f. 2024-11-14T09:33:16,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37631 {}] regionserver.HRegion(8855): Flush requested on c3c1078303094fd4e13055e981b40c0f 2024-11-14T09:33:16,502 INFO [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/c730dde86f094908b06f984f0f7f61ea, hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/147fa0cf0da2493f97920f97dbcdec72, hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/6356fb87e9e345139aab528738a88204] into tmpdir=hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/.tmp, totalSize=123.7 K 2024-11-14T09:33:16,502 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c3c1078303094fd4e13055e981b40c0f 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-14T09:33:16,502 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] compactions.Compactor(225): Compacting c730dde86f094908b06f984f0f7f61ea, keycount=82, bloomtype=ROW, size=91.9 K, encoding=NONE, compression=NONE, seqNum=187, earliestPutTs=1731576778249 2024-11-14T09:33:16,502 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] compactions.Compactor(225): Compacting 147fa0cf0da2493f97920f97dbcdec72, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=204, earliestPutTs=1731576794446 2024-11-14T09:33:16,503 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] compactions.Compactor(225): Compacting 6356fb87e9e345139aab528738a88204, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1731576794468 2024-11-14T09:33:16,505 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/.tmp/info/a2e585d25d034573bb6f524274907b76 is 1080, key is row0165/info:/1731576796480/Put/seqid=0 2024-11-14T09:33:16,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40913 is added to blk_1073741864_1040 (size=21156) 2024-11-14T09:33:16,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39945 is added to blk_1073741864_1040 (size=21156) 2024-11-14T09:33:16,511 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=233 (bloomFilter=true), to=hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/.tmp/info/a2e585d25d034573bb6f524274907b76 2024-11-14T09:33:16,517 INFO [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c3c1078303094fd4e13055e981b40c0f#info#compaction#81 average throughput is 52.85 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-14T09:33:16,518 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/.tmp/info/d78c6e9d7b174de7ba7833e909f18684 is 1080, key is row0062/info:/1731576778249/Put/seqid=0 2024-11-14T09:33:16,519 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/.tmp/info/a2e585d25d034573bb6f524274907b76 as hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/a2e585d25d034573bb6f524274907b76 2024-11-14T09:33:16,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40913 is added to blk_1073741865_1041 (size=116840) 2024-11-14T09:33:16,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39945 is added to blk_1073741865_1041 (size=116840) 2024-11-14T09:33:16,524 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/a2e585d25d034573bb6f524274907b76, entries=15, sequenceid=233, filesize=20.7 K 2024-11-14T09:33:16,525 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=13.66 KB/13988 for c3c1078303094fd4e13055e981b40c0f in 23ms, sequenceid=233, compaction requested=false 2024-11-14T09:33:16,525 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c3c1078303094fd4e13055e981b40c0f: 2024-11-14T09:33:16,527 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/.tmp/info/d78c6e9d7b174de7ba7833e909f18684 as hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/d78c6e9d7b174de7ba7833e909f18684 2024-11-14T09:33:16,532 INFO [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in c3c1078303094fd4e13055e981b40c0f/info of c3c1078303094fd4e13055e981b40c0f into d78c6e9d7b174de7ba7833e909f18684(size=114.1 K), total size for store is 134.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-14T09:33:16,532 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for c3c1078303094fd4e13055e981b40c0f: 2024-11-14T09:33:16,532 INFO [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731576780315.c3c1078303094fd4e13055e981b40c0f., storeName=c3c1078303094fd4e13055e981b40c0f/info, priority=13, startTime=1731576796500; duration=0sec 2024-11-14T09:33:16,532 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T09:33:16,532 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c3c1078303094fd4e13055e981b40c0f:info 2024-11-14T09:33:16,931 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:33:16,931 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:33:17,932 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:33:17,932 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:33:18,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37631 {}] regionserver.HRegion(8855): Flush requested on c3c1078303094fd4e13055e981b40c0f 2024-11-14T09:33:18,526 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c3c1078303094fd4e13055e981b40c0f 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-14T09:33:18,530 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/.tmp/info/2c141aceed654de88691f4e3e0d0f5a1 is 1080, key is row0180/info:/1731576796502/Put/seqid=0 2024-11-14T09:33:18,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39945 is added to blk_1073741866_1042 (size=20078) 2024-11-14T09:33:18,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40913 is added to blk_1073741866_1042 (size=20078) 2024-11-14T09:33:18,542 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=251 (bloomFilter=true), to=hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/.tmp/info/2c141aceed654de88691f4e3e0d0f5a1 2024-11-14T09:33:18,548 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/.tmp/info/2c141aceed654de88691f4e3e0d0f5a1 as hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/2c141aceed654de88691f4e3e0d0f5a1 2024-11-14T09:33:18,550 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37631 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=c3c1078303094fd4e13055e981b40c0f, server=83f56b55f2af,37631,1731576765291 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-14T09:33:18,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37631 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:46058 deadline: 1731576808549, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=c3c1078303094fd4e13055e981b40c0f, server=83f56b55f2af,37631,1731576765291 2024-11-14T09:33:18,551 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,row0062,1731576780315.c3c1078303094fd4e13055e981b40c0f., hostname=83f56b55f2af,37631,1731576765291, seqNum=127 , the old value is region=TestLogRolling-testLogRolling,row0062,1731576780315.c3c1078303094fd4e13055e981b40c0f., hostname=83f56b55f2af,37631,1731576765291, seqNum=127, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=c3c1078303094fd4e13055e981b40c0f, server=83f56b55f2af,37631,1731576765291 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-14T09:33:18,551 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,row0062,1731576780315.c3c1078303094fd4e13055e981b40c0f., hostname=83f56b55f2af,37631,1731576765291, seqNum=127 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=c3c1078303094fd4e13055e981b40c0f, server=83f56b55f2af,37631,1731576765291 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-14T09:33:18,551 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,row0062,1731576780315.c3c1078303094fd4e13055e981b40c0f., hostname=83f56b55f2af,37631,1731576765291, seqNum=127 because the exception is null or not the one we care about 2024-11-14T09:33:18,552 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/2c141aceed654de88691f4e3e0d0f5a1, entries=14, sequenceid=251, filesize=19.6 K 2024-11-14T09:33:18,553 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=15.76 KB/16140 for c3c1078303094fd4e13055e981b40c0f in 27ms, sequenceid=251, compaction requested=true 2024-11-14T09:33:18,553 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c3c1078303094fd4e13055e981b40c0f: 2024-11-14T09:33:18,553 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c3c1078303094fd4e13055e981b40c0f:info, priority=-2147483648, current under compaction store size is 1 2024-11-14T09:33:18,553 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T09:33:18,553 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-14T09:33:18,554 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 158074 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-14T09:33:18,554 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.HStore(1541): c3c1078303094fd4e13055e981b40c0f/info is initiating minor compaction (all files) 2024-11-14T09:33:18,554 INFO [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of c3c1078303094fd4e13055e981b40c0f/info in TestLogRolling-testLogRolling,row0062,1731576780315.c3c1078303094fd4e13055e981b40c0f. 2024-11-14T09:33:18,554 INFO [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/d78c6e9d7b174de7ba7833e909f18684, hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/a2e585d25d034573bb6f524274907b76, hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/2c141aceed654de88691f4e3e0d0f5a1] into tmpdir=hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/.tmp, totalSize=154.4 K 2024-11-14T09:33:18,555 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] compactions.Compactor(225): Compacting d78c6e9d7b174de7ba7833e909f18684, keycount=103, bloomtype=ROW, size=114.1 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1731576778249 2024-11-14T09:33:18,555 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] compactions.Compactor(225): Compacting a2e585d25d034573bb6f524274907b76, keycount=15, bloomtype=ROW, size=20.7 K, encoding=NONE, compression=NONE, seqNum=233, earliestPutTs=1731576796480 2024-11-14T09:33:18,555 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] compactions.Compactor(225): Compacting 2c141aceed654de88691f4e3e0d0f5a1, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1731576796502 2024-11-14T09:33:18,565 INFO [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c3c1078303094fd4e13055e981b40c0f#info#compaction#83 average throughput is 67.73 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-14T09:33:18,566 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/.tmp/info/441b66c50dcc48d3b9c4724c33c14da0 is 1080, key is row0062/info:/1731576778249/Put/seqid=0 2024-11-14T09:33:18,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40913 is added to blk_1073741867_1043 (size=148409) 2024-11-14T09:33:18,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39945 is added to blk_1073741867_1043 (size=148409) 2024-11-14T09:33:18,574 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/.tmp/info/441b66c50dcc48d3b9c4724c33c14da0 as hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/441b66c50dcc48d3b9c4724c33c14da0 2024-11-14T09:33:18,580 INFO [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in c3c1078303094fd4e13055e981b40c0f/info of c3c1078303094fd4e13055e981b40c0f into 441b66c50dcc48d3b9c4724c33c14da0(size=144.9 K), total size for store is 144.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-14T09:33:18,580 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for c3c1078303094fd4e13055e981b40c0f: 2024-11-14T09:33:18,580 INFO [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731576780315.c3c1078303094fd4e13055e981b40c0f., storeName=c3c1078303094fd4e13055e981b40c0f/info, priority=13, startTime=1731576798553; duration=0sec 2024-11-14T09:33:18,580 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T09:33:18,580 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c3c1078303094fd4e13055e981b40c0f:info 2024-11-14T09:33:18,933 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:33:18,933 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:33:19,933 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:33:19,933 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:33:20,934 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:33:20,934 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:33:21,935 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:33:21,935 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:33:22,935 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:33:22,935 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:33:23,936 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:33:23,936 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:33:24,937 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:33:24,937 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:33:25,938 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:33:25,938 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:33:26,087 INFO [master/83f56b55f2af:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-14T09:33:26,087 INFO [master/83f56b55f2af:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-14T09:33:26,938 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:33:26,938 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:33:27,939 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:33:27,939 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:33:28,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37631 {}] regionserver.HRegion(8855): Flush requested on c3c1078303094fd4e13055e981b40c0f 2024-11-14T09:33:28,594 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c3c1078303094fd4e13055e981b40c0f 1/1 column families, dataSize=16.81 KB heapSize=18.25 KB 2024-11-14T09:33:28,599 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/.tmp/info/29a45b3d143e482dbe1c0becc4a82d69 is 1080, key is row0194/info:/1731576798527/Put/seqid=0 2024-11-14T09:33:28,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40913 is added to blk_1073741868_1044 (size=22251) 2024-11-14T09:33:28,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39945 is added to blk_1073741868_1044 (size=22251) 2024-11-14T09:33:28,605 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=16.81 KB at sequenceid=271 (bloomFilter=true), to=hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/.tmp/info/29a45b3d143e482dbe1c0becc4a82d69 2024-11-14T09:33:28,611 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/.tmp/info/29a45b3d143e482dbe1c0becc4a82d69 as hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/29a45b3d143e482dbe1c0becc4a82d69 2024-11-14T09:33:28,615 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37631 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=c3c1078303094fd4e13055e981b40c0f, server=83f56b55f2af,37631,1731576765291 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-14T09:33:28,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37631 {}] ipc.CallRunner(138): callId: 232 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:46058 deadline: 1731576818614, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=c3c1078303094fd4e13055e981b40c0f, server=83f56b55f2af,37631,1731576765291 2024-11-14T09:33:28,615 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,row0062,1731576780315.c3c1078303094fd4e13055e981b40c0f., hostname=83f56b55f2af,37631,1731576765291, seqNum=127 , the old value is region=TestLogRolling-testLogRolling,row0062,1731576780315.c3c1078303094fd4e13055e981b40c0f., hostname=83f56b55f2af,37631,1731576765291, seqNum=127, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=c3c1078303094fd4e13055e981b40c0f, server=83f56b55f2af,37631,1731576765291 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-14T09:33:28,615 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,row0062,1731576780315.c3c1078303094fd4e13055e981b40c0f., hostname=83f56b55f2af,37631,1731576765291, seqNum=127 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=c3c1078303094fd4e13055e981b40c0f, server=83f56b55f2af,37631,1731576765291 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-14T09:33:28,615 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,row0062,1731576780315.c3c1078303094fd4e13055e981b40c0f., hostname=83f56b55f2af,37631,1731576765291, seqNum=127 because the exception is null or not the one we care about 2024-11-14T09:33:28,616 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/29a45b3d143e482dbe1c0becc4a82d69, entries=16, sequenceid=271, filesize=21.7 K 2024-11-14T09:33:28,617 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~16.81 KB/17216, heapSize ~18.23 KB/18672, currentSize=13.66 KB/13988 for c3c1078303094fd4e13055e981b40c0f in 23ms, sequenceid=271, compaction requested=false 2024-11-14T09:33:28,617 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c3c1078303094fd4e13055e981b40c0f: 2024-11-14T09:33:28,940 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:33:28,940 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:33:29,941 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:33:29,941 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:33:30,941 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:33:30,941 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:33:31,045 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1588230740, had cached 0 bytes from a total of 20340 2024-11-14T09:33:31,942 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:33:31,942 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:33:32,943 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:33:32,943 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:33:33,943 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:33:33,943 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:33:34,944 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:33:34,944 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:33:35,945 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:33:35,945 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:33:36,945 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:33:36,945 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:33:37,946 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:33:37,946 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:33:38,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37631 {}] regionserver.HRegion(8855): Flush requested on c3c1078303094fd4e13055e981b40c0f 2024-11-14T09:33:38,644 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c3c1078303094fd4e13055e981b40c0f 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-14T09:33:38,649 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/.tmp/info/66a298faf89c4acd858e8a55c71b5280 is 1080, key is row0210/info:/1731576808595/Put/seqid=0 2024-11-14T09:33:38,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39945 is added to blk_1073741869_1045 (size=20092) 2024-11-14T09:33:38,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40913 is added to blk_1073741869_1045 (size=20092) 2024-11-14T09:33:38,654 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=288 (bloomFilter=true), to=hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/.tmp/info/66a298faf89c4acd858e8a55c71b5280 2024-11-14T09:33:38,660 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/.tmp/info/66a298faf89c4acd858e8a55c71b5280 as hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/66a298faf89c4acd858e8a55c71b5280 2024-11-14T09:33:38,664 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/66a298faf89c4acd858e8a55c71b5280, entries=14, sequenceid=288, filesize=19.6 K 2024-11-14T09:33:38,665 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=1.05 KB/1076 for c3c1078303094fd4e13055e981b40c0f in 21ms, sequenceid=288, compaction requested=true 2024-11-14T09:33:38,665 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c3c1078303094fd4e13055e981b40c0f: 2024-11-14T09:33:38,665 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c3c1078303094fd4e13055e981b40c0f:info, priority=-2147483648, current under compaction store size is 1 2024-11-14T09:33:38,665 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T09:33:38,665 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-14T09:33:38,666 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 190752 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-14T09:33:38,666 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.HStore(1541): c3c1078303094fd4e13055e981b40c0f/info is initiating minor compaction (all files) 2024-11-14T09:33:38,666 INFO [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of c3c1078303094fd4e13055e981b40c0f/info in TestLogRolling-testLogRolling,row0062,1731576780315.c3c1078303094fd4e13055e981b40c0f. 2024-11-14T09:33:38,666 INFO [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/441b66c50dcc48d3b9c4724c33c14da0, hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/29a45b3d143e482dbe1c0becc4a82d69, hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/66a298faf89c4acd858e8a55c71b5280] into tmpdir=hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/.tmp, totalSize=186.3 K 2024-11-14T09:33:38,666 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] compactions.Compactor(225): Compacting 441b66c50dcc48d3b9c4724c33c14da0, keycount=132, bloomtype=ROW, size=144.9 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1731576778249 2024-11-14T09:33:38,667 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] compactions.Compactor(225): Compacting 29a45b3d143e482dbe1c0becc4a82d69, keycount=16, bloomtype=ROW, size=21.7 K, encoding=NONE, compression=NONE, seqNum=271, earliestPutTs=1731576798527 2024-11-14T09:33:38,667 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] compactions.Compactor(225): Compacting 66a298faf89c4acd858e8a55c71b5280, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1731576808595 2024-11-14T09:33:38,677 INFO [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c3c1078303094fd4e13055e981b40c0f#info#compaction#86 average throughput is 83.12 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-14T09:33:38,678 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/.tmp/info/c4a85e8c6f704a2d87285d80bc7293f4 is 1080, key is row0062/info:/1731576778249/Put/seqid=0 2024-11-14T09:33:38,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39945 is added to blk_1073741870_1046 (size=180886) 2024-11-14T09:33:38,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40913 is added to blk_1073741870_1046 (size=180886) 2024-11-14T09:33:38,686 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/.tmp/info/c4a85e8c6f704a2d87285d80bc7293f4 as hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/c4a85e8c6f704a2d87285d80bc7293f4 2024-11-14T09:33:38,691 INFO [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in c3c1078303094fd4e13055e981b40c0f/info of c3c1078303094fd4e13055e981b40c0f into c4a85e8c6f704a2d87285d80bc7293f4(size=176.6 K), total size for store is 176.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-14T09:33:38,691 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for c3c1078303094fd4e13055e981b40c0f: 2024-11-14T09:33:38,691 INFO [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731576780315.c3c1078303094fd4e13055e981b40c0f., storeName=c3c1078303094fd4e13055e981b40c0f/info, priority=13, startTime=1731576818665; duration=0sec 2024-11-14T09:33:38,691 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T09:33:38,691 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c3c1078303094fd4e13055e981b40c0f:info 2024-11-14T09:33:38,947 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:33:38,947 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:33:39,948 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:33:39,948 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:33:40,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37631 {}] regionserver.HRegion(8855): Flush requested on c3c1078303094fd4e13055e981b40c0f 2024-11-14T09:33:40,655 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c3c1078303094fd4e13055e981b40c0f 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-14T09:33:40,659 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/.tmp/info/ca3bd8172740457287df3846883fa0e3 is 1080, key is row0224/info:/1731576818645/Put/seqid=0 2024-11-14T09:33:40,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40913 is added to blk_1073741871_1047 (size=12523) 2024-11-14T09:33:40,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39945 is added to blk_1073741871_1047 (size=12523) 2024-11-14T09:33:40,668 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=299 (bloomFilter=true), to=hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/.tmp/info/ca3bd8172740457287df3846883fa0e3 2024-11-14T09:33:40,674 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/.tmp/info/ca3bd8172740457287df3846883fa0e3 as hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/ca3bd8172740457287df3846883fa0e3 2024-11-14T09:33:40,678 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/ca3bd8172740457287df3846883fa0e3, entries=7, sequenceid=299, filesize=12.2 K 2024-11-14T09:33:40,679 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=16.81 KB/17216 for c3c1078303094fd4e13055e981b40c0f in 24ms, sequenceid=299, compaction requested=false 2024-11-14T09:33:40,679 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c3c1078303094fd4e13055e981b40c0f: 2024-11-14T09:33:40,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37631 {}] regionserver.HRegion(8855): Flush requested on c3c1078303094fd4e13055e981b40c0f 2024-11-14T09:33:40,679 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c3c1078303094fd4e13055e981b40c0f 1/1 column families, dataSize=17.86 KB heapSize=19.38 KB 2024-11-14T09:33:40,683 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/.tmp/info/85cb459aca3e4803a3ee868a2ab7aad5 is 1080, key is row0231/info:/1731576820656/Put/seqid=0 2024-11-14T09:33:40,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40913 is added to blk_1073741872_1048 (size=23333) 2024-11-14T09:33:40,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39945 is added to blk_1073741872_1048 (size=23333) 2024-11-14T09:33:40,688 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.86 KB at sequenceid=319 (bloomFilter=true), to=hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/.tmp/info/85cb459aca3e4803a3ee868a2ab7aad5 2024-11-14T09:33:40,693 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/.tmp/info/85cb459aca3e4803a3ee868a2ab7aad5 as hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/85cb459aca3e4803a3ee868a2ab7aad5 2024-11-14T09:33:40,697 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/85cb459aca3e4803a3ee868a2ab7aad5, entries=17, sequenceid=319, filesize=22.8 K 2024-11-14T09:33:40,698 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~17.86 KB/18292, heapSize ~19.36 KB/19824, currentSize=9.46 KB/9684 for c3c1078303094fd4e13055e981b40c0f in 19ms, sequenceid=319, compaction requested=true 2024-11-14T09:33:40,698 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c3c1078303094fd4e13055e981b40c0f: 2024-11-14T09:33:40,698 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c3c1078303094fd4e13055e981b40c0f:info, priority=-2147483648, current under compaction store size is 1 2024-11-14T09:33:40,698 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T09:33:40,698 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-14T09:33:40,699 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 216742 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-14T09:33:40,699 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.HStore(1541): c3c1078303094fd4e13055e981b40c0f/info is initiating minor compaction (all files) 2024-11-14T09:33:40,699 INFO [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of c3c1078303094fd4e13055e981b40c0f/info in TestLogRolling-testLogRolling,row0062,1731576780315.c3c1078303094fd4e13055e981b40c0f. 2024-11-14T09:33:40,700 INFO [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/c4a85e8c6f704a2d87285d80bc7293f4, hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/ca3bd8172740457287df3846883fa0e3, hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/85cb459aca3e4803a3ee868a2ab7aad5] into tmpdir=hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/.tmp, totalSize=211.7 K 2024-11-14T09:33:40,700 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] compactions.Compactor(225): Compacting c4a85e8c6f704a2d87285d80bc7293f4, keycount=162, bloomtype=ROW, size=176.6 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1731576778249 2024-11-14T09:33:40,700 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] compactions.Compactor(225): Compacting ca3bd8172740457287df3846883fa0e3, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=299, earliestPutTs=1731576818645 2024-11-14T09:33:40,700 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] compactions.Compactor(225): Compacting 85cb459aca3e4803a3ee868a2ab7aad5, keycount=17, bloomtype=ROW, size=22.8 K, encoding=NONE, compression=NONE, seqNum=319, earliestPutTs=1731576820656 2024-11-14T09:33:40,711 INFO [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c3c1078303094fd4e13055e981b40c0f#info#compaction#89 average throughput is 95.43 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-14T09:33:40,712 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/.tmp/info/73b89ab9516844b586c21e47be71e0e3 is 1080, key is row0062/info:/1731576778249/Put/seqid=0 2024-11-14T09:33:40,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39945 is added to blk_1073741873_1049 (size=206961) 2024-11-14T09:33:40,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40913 is added to blk_1073741873_1049 (size=206961) 2024-11-14T09:33:40,720 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/.tmp/info/73b89ab9516844b586c21e47be71e0e3 as hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/73b89ab9516844b586c21e47be71e0e3 2024-11-14T09:33:40,726 INFO [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in c3c1078303094fd4e13055e981b40c0f/info of c3c1078303094fd4e13055e981b40c0f into 73b89ab9516844b586c21e47be71e0e3(size=202.1 K), total size for store is 202.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-14T09:33:40,726 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for c3c1078303094fd4e13055e981b40c0f: 2024-11-14T09:33:40,726 INFO [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731576780315.c3c1078303094fd4e13055e981b40c0f., storeName=c3c1078303094fd4e13055e981b40c0f/info, priority=13, startTime=1731576820698; duration=0sec 2024-11-14T09:33:40,726 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T09:33:40,727 DEBUG [RS:0;83f56b55f2af:37631-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c3c1078303094fd4e13055e981b40c0f:info 2024-11-14T09:33:40,948 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:33:40,948 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:33:41,949 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:33:41,949 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:33:42,693 INFO [Time-limited test {}] wal.AbstractTestLogRolling(285): after writing there are 0 log files 2024-11-14T09:33:42,694 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 83f56b55f2af%2C37631%2C1731576765291.1731576822694 2024-11-14T09:33:42,700 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:33:42,700 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:33:42,700 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:33:42,700 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:33:42,700 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:33:42,700 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/WALs/83f56b55f2af,37631,1731576765291/83f56b55f2af%2C37631%2C1731576765291.1731576765666 with entries=310, filesize=307.89 KB; new WAL /user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/WALs/83f56b55f2af,37631,1731576765291/83f56b55f2af%2C37631%2C1731576765291.1731576822694 2024-11-14T09:33:42,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39945 is added to blk_1073741833_1009 (size=315283) 2024-11-14T09:33:42,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40913 is added to blk_1073741833_1009 (size=315283) 2024-11-14T09:33:42,708 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34449:34449),(127.0.0.1/127.0.0.1:35097:35097)] 2024-11-14T09:33:42,711 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for e37492aee67741e333835d8cf461e1c0: 2024-11-14T09:33:42,711 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=705 B heapSize=2.05 KB 2024-11-14T09:33:42,715 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/hbase/meta/1588230740/.tmp/info/3d587b07fcd04cb399d5a2b038a3eb8e is 193, key is TestLogRolling-testLogRolling,row0062,1731576780315.c3c1078303094fd4e13055e981b40c0f./info:regioninfo/1731576780999/Put/seqid=0 2024-11-14T09:33:42,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40913 is added to blk_1073741875_1051 (size=6223) 2024-11-14T09:33:42,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39945 is added to blk_1073741875_1051 (size=6223) 2024-11-14T09:33:42,722 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=705 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/hbase/meta/1588230740/.tmp/info/3d587b07fcd04cb399d5a2b038a3eb8e 2024-11-14T09:33:42,727 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/hbase/meta/1588230740/.tmp/info/3d587b07fcd04cb399d5a2b038a3eb8e as hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/hbase/meta/1588230740/info/3d587b07fcd04cb399d5a2b038a3eb8e 2024-11-14T09:33:42,731 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/hbase/meta/1588230740/info/3d587b07fcd04cb399d5a2b038a3eb8e, entries=5, sequenceid=21, filesize=6.1 K 2024-11-14T09:33:42,732 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~705 B/705, heapSize ~1.29 KB/1320, currentSize=0 B/0 for 1588230740 in 21ms, sequenceid=21, compaction requested=false 2024-11-14T09:33:42,732 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-14T09:33:42,732 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing c3c1078303094fd4e13055e981b40c0f 1/1 column families, dataSize=9.46 KB heapSize=10.38 KB 2024-11-14T09:33:42,736 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/.tmp/info/68f7a246b199412e86acb2412890d514 is 1080, key is row0248/info:/1731576820680/Put/seqid=0 2024-11-14T09:33:42,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39945 is added to blk_1073741876_1052 (size=14681) 2024-11-14T09:33:42,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40913 is added to blk_1073741876_1052 (size=14681) 2024-11-14T09:33:42,740 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=9.46 KB at sequenceid=332 (bloomFilter=true), to=hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/.tmp/info/68f7a246b199412e86acb2412890d514 2024-11-14T09:33:42,745 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/.tmp/info/68f7a246b199412e86acb2412890d514 as hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/68f7a246b199412e86acb2412890d514 2024-11-14T09:33:42,749 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/68f7a246b199412e86acb2412890d514, entries=9, sequenceid=332, filesize=14.3 K 2024-11-14T09:33:42,749 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~9.46 KB/9684, heapSize ~10.36 KB/10608, currentSize=0 B/0 for c3c1078303094fd4e13055e981b40c0f in 17ms, sequenceid=332, compaction requested=false 2024-11-14T09:33:42,750 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for c3c1078303094fd4e13055e981b40c0f: 2024-11-14T09:33:42,750 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 83f56b55f2af%2C37631%2C1731576765291.1731576822750 2024-11-14T09:33:42,754 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:33:42,754 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:33:42,754 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:33:42,754 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:33:42,754 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:33:42,754 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/WALs/83f56b55f2af,37631,1731576765291/83f56b55f2af%2C37631%2C1731576765291.1731576822694 with entries=2, filesize=723 B; new WAL /user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/WALs/83f56b55f2af,37631,1731576765291/83f56b55f2af%2C37631%2C1731576765291.1731576822750 2024-11-14T09:33:42,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39945 is added to blk_1073741874_1050 (size=731) 2024-11-14T09:33:42,756 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35097:35097),(127.0.0.1/127.0.0.1:34449:34449)] 2024-11-14T09:33:42,756 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/WALs/83f56b55f2af,37631,1731576765291/83f56b55f2af%2C37631%2C1731576765291.1731576822694 is not closed yet, will try archiving it next time 2024-11-14T09:33:42,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40913 is added to blk_1073741874_1050 (size=731) 2024-11-14T09:33:42,756 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/WALs/83f56b55f2af,37631,1731576765291/83f56b55f2af%2C37631%2C1731576765291.1731576765666 to hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/oldWALs/83f56b55f2af%2C37631%2C1731576765291.1731576765666 2024-11-14T09:33:42,757 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [5,000] milli-secs(wait.for.ratio=[1]) 2024-11-14T09:33:42,757 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-14T09:33:42,757 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/WALs/83f56b55f2af,37631,1731576765291/83f56b55f2af%2C37631%2C1731576765291.1731576822694 to hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/oldWALs/83f56b55f2af%2C37631%2C1731576765291.1731576822694 2024-11-14T09:33:42,757 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-14T09:33:42,757 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T09:33:42,757 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T09:33:42,758 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T09:33:42,758 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-14T09:33:42,758 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-14T09:33:42,758 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1474080094, stopped=false 2024-11-14T09:33:42,758 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=83f56b55f2af,44845,1731576765244 2024-11-14T09:33:42,760 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44845-0x10115d285be0000, quorum=127.0.0.1:52182, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-14T09:33:42,760 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44845-0x10115d285be0000, quorum=127.0.0.1:52182, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:33:42,760 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37631-0x10115d285be0001, quorum=127.0.0.1:52182, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-14T09:33:42,760 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37631-0x10115d285be0001, quorum=127.0.0.1:52182, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:33:42,760 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-14T09:33:42,761 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-14T09:33:42,761 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:44845-0x10115d285be0000, quorum=127.0.0.1:52182, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T09:33:42,761 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37631-0x10115d285be0001, quorum=127.0.0.1:52182, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T09:33:42,761 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T09:33:42,761 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T09:33:42,761 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '83f56b55f2af,37631,1731576765291' ***** 2024-11-14T09:33:42,761 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-14T09:33:42,762 INFO [RS:0;83f56b55f2af:37631 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-14T09:33:42,762 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-14T09:33:42,762 INFO [RS:0;83f56b55f2af:37631 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-14T09:33:42,762 INFO [RS:0;83f56b55f2af:37631 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-14T09:33:42,762 INFO [RS:0;83f56b55f2af:37631 {}] regionserver.HRegionServer(3091): Received CLOSE for e37492aee67741e333835d8cf461e1c0 2024-11-14T09:33:42,762 INFO [RS:0;83f56b55f2af:37631 {}] regionserver.HRegionServer(3091): Received CLOSE for c3c1078303094fd4e13055e981b40c0f 2024-11-14T09:33:42,762 INFO [RS:0;83f56b55f2af:37631 {}] regionserver.HRegionServer(959): stopping server 83f56b55f2af,37631,1731576765291 2024-11-14T09:33:42,762 INFO [RS:0;83f56b55f2af:37631 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-14T09:33:42,762 DEBUG [RS_CLOSE_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing e37492aee67741e333835d8cf461e1c0, disabling compactions & flushes 2024-11-14T09:33:42,762 INFO [RS:0;83f56b55f2af:37631 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;83f56b55f2af:37631. 2024-11-14T09:33:42,762 INFO [RS_CLOSE_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731576780315.e37492aee67741e333835d8cf461e1c0. 2024-11-14T09:33:42,762 DEBUG [RS_CLOSE_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731576780315.e37492aee67741e333835d8cf461e1c0. 2024-11-14T09:33:42,762 DEBUG [RS_CLOSE_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731576780315.e37492aee67741e333835d8cf461e1c0. after waiting 0 ms 2024-11-14T09:33:42,762 DEBUG [RS:0;83f56b55f2af:37631 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T09:33:42,762 DEBUG [RS_CLOSE_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731576780315.e37492aee67741e333835d8cf461e1c0. 2024-11-14T09:33:42,762 DEBUG [RS:0;83f56b55f2af:37631 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T09:33:42,762 INFO [RS:0;83f56b55f2af:37631 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-14T09:33:42,762 INFO [RS:0;83f56b55f2af:37631 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-14T09:33:42,762 INFO [RS:0;83f56b55f2af:37631 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-14T09:33:42,762 INFO [RS:0;83f56b55f2af:37631 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-14T09:33:42,763 INFO [RS:0;83f56b55f2af:37631 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-11-14T09:33:42,763 DEBUG [RS:0;83f56b55f2af:37631 {}] regionserver.HRegionServer(1325): Online Regions={e37492aee67741e333835d8cf461e1c0=TestLogRolling-testLogRolling,,1731576780315.e37492aee67741e333835d8cf461e1c0., 1588230740=hbase:meta,,1.1588230740, c3c1078303094fd4e13055e981b40c0f=TestLogRolling-testLogRolling,row0062,1731576780315.c3c1078303094fd4e13055e981b40c0f.} 2024-11-14T09:33:42,762 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731576780315.e37492aee67741e333835d8cf461e1c0.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/e37492aee67741e333835d8cf461e1c0/info/845746a6c4ee4d36a6f0a62fd8c2a7e3.b51662f39c14a78ddb454bac85017be1->hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/info/845746a6c4ee4d36a6f0a62fd8c2a7e3-bottom] to archive 2024-11-14T09:33:42,763 DEBUG [RS:0;83f56b55f2af:37631 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, c3c1078303094fd4e13055e981b40c0f, e37492aee67741e333835d8cf461e1c0 2024-11-14T09:33:42,763 DEBUG [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-14T09:33:42,763 INFO [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-14T09:33:42,763 DEBUG [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-14T09:33:42,763 DEBUG [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-14T09:33:42,763 DEBUG [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-14T09:33:42,763 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731576780315.e37492aee67741e333835d8cf461e1c0.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-14T09:33:42,765 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731576780315.e37492aee67741e333835d8cf461e1c0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/e37492aee67741e333835d8cf461e1c0/info/845746a6c4ee4d36a6f0a62fd8c2a7e3.b51662f39c14a78ddb454bac85017be1 to hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/archive/data/default/TestLogRolling-testLogRolling/e37492aee67741e333835d8cf461e1c0/info/845746a6c4ee4d36a6f0a62fd8c2a7e3.b51662f39c14a78ddb454bac85017be1 2024-11-14T09:33:42,765 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731576780315.e37492aee67741e333835d8cf461e1c0.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=83f56b55f2af:44845 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-14T09:33:42,765 WARN [StoreCloser-TestLogRolling-testLogRolling,,1731576780315.e37492aee67741e333835d8cf461e1c0.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [] 2024-11-14T09:33:42,767 DEBUG [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/hbase/meta/1588230740/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-11-14T09:33:42,768 DEBUG [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-14T09:33:42,768 INFO [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-14T09:33:42,768 DEBUG [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731576822763Running coprocessor pre-close hooks at 1731576822763Disabling compacts and flushes for region at 1731576822763Disabling writes for close at 1731576822763Writing region close event to WAL at 1731576822764 (+1 ms)Running coprocessor post-close hooks at 1731576822768 (+4 ms)Closed at 1731576822768 2024-11-14T09:33:42,768 DEBUG [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-14T09:33:42,768 DEBUG [RS_CLOSE_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/e37492aee67741e333835d8cf461e1c0/recovered.edits/131.seqid, newMaxSeqId=131, maxSeqId=126 2024-11-14T09:33:42,769 INFO [RS_CLOSE_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731576780315.e37492aee67741e333835d8cf461e1c0. 2024-11-14T09:33:42,769 DEBUG [RS_CLOSE_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for e37492aee67741e333835d8cf461e1c0: Waiting for close lock at 1731576822762Running coprocessor pre-close hooks at 1731576822762Disabling compacts and flushes for region at 1731576822762Disabling writes for close at 1731576822762Writing region close event to WAL at 1731576822766 (+4 ms)Running coprocessor post-close hooks at 1731576822769 (+3 ms)Closed at 1731576822769 2024-11-14T09:33:42,769 DEBUG [RS_CLOSE_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,,1731576780315.e37492aee67741e333835d8cf461e1c0. 2024-11-14T09:33:42,769 DEBUG [RS_CLOSE_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing c3c1078303094fd4e13055e981b40c0f, disabling compactions & flushes 2024-11-14T09:33:42,769 INFO [RS_CLOSE_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,row0062,1731576780315.c3c1078303094fd4e13055e981b40c0f. 2024-11-14T09:33:42,769 DEBUG [RS_CLOSE_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,row0062,1731576780315.c3c1078303094fd4e13055e981b40c0f. 2024-11-14T09:33:42,769 DEBUG [RS_CLOSE_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,row0062,1731576780315.c3c1078303094fd4e13055e981b40c0f. after waiting 0 ms 2024-11-14T09:33:42,769 DEBUG [RS_CLOSE_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,row0062,1731576780315.c3c1078303094fd4e13055e981b40c0f. 2024-11-14T09:33:42,769 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731576780315.c3c1078303094fd4e13055e981b40c0f.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/845746a6c4ee4d36a6f0a62fd8c2a7e3.b51662f39c14a78ddb454bac85017be1->hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/b51662f39c14a78ddb454bac85017be1/info/845746a6c4ee4d36a6f0a62fd8c2a7e3-top, hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/TestLogRolling-testLogRolling=b51662f39c14a78ddb454bac85017be1-eb0be21778424e448b3e237a17c943a6, hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/TestLogRolling-testLogRolling=b51662f39c14a78ddb454bac85017be1-c1d9d1ed904f4bfeb3003340124c3869, hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/9b170149b86749aea1b3411ffa386013, hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/TestLogRolling-testLogRolling=b51662f39c14a78ddb454bac85017be1-fa82039215704c3e936abbc8a5843e22, hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/0f4fbb71d7e947f1ae28c7b235122bae, hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/e89d41f770734ae2906f3a272e054e69, hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/af4bf3d3cff44190af8e7c8ed8d9ac0a, hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/5f321a956c224c53b0d136f2538657b3, hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/c730dde86f094908b06f984f0f7f61ea, hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/9be5c95cb370412692c99832b101951f, hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/147fa0cf0da2493f97920f97dbcdec72, hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/d78c6e9d7b174de7ba7833e909f18684, hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/6356fb87e9e345139aab528738a88204, hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/a2e585d25d034573bb6f524274907b76, hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/441b66c50dcc48d3b9c4724c33c14da0, hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/2c141aceed654de88691f4e3e0d0f5a1, hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/29a45b3d143e482dbe1c0becc4a82d69, hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/c4a85e8c6f704a2d87285d80bc7293f4, hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/66a298faf89c4acd858e8a55c71b5280, hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/ca3bd8172740457287df3846883fa0e3, hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/85cb459aca3e4803a3ee868a2ab7aad5] to archive 2024-11-14T09:33:42,770 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731576780315.c3c1078303094fd4e13055e981b40c0f.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-14T09:33:42,772 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731576780315.c3c1078303094fd4e13055e981b40c0f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/845746a6c4ee4d36a6f0a62fd8c2a7e3.b51662f39c14a78ddb454bac85017be1 to hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/archive/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/845746a6c4ee4d36a6f0a62fd8c2a7e3.b51662f39c14a78ddb454bac85017be1 2024-11-14T09:33:42,773 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731576780315.c3c1078303094fd4e13055e981b40c0f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/TestLogRolling-testLogRolling=b51662f39c14a78ddb454bac85017be1-eb0be21778424e448b3e237a17c943a6 to hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/archive/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/TestLogRolling-testLogRolling=b51662f39c14a78ddb454bac85017be1-eb0be21778424e448b3e237a17c943a6 2024-11-14T09:33:42,774 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731576780315.c3c1078303094fd4e13055e981b40c0f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/TestLogRolling-testLogRolling=b51662f39c14a78ddb454bac85017be1-c1d9d1ed904f4bfeb3003340124c3869 to hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/archive/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/TestLogRolling-testLogRolling=b51662f39c14a78ddb454bac85017be1-c1d9d1ed904f4bfeb3003340124c3869 2024-11-14T09:33:42,775 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731576780315.c3c1078303094fd4e13055e981b40c0f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/9b170149b86749aea1b3411ffa386013 to hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/archive/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/9b170149b86749aea1b3411ffa386013 2024-11-14T09:33:42,776 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731576780315.c3c1078303094fd4e13055e981b40c0f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/TestLogRolling-testLogRolling=b51662f39c14a78ddb454bac85017be1-fa82039215704c3e936abbc8a5843e22 to hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/archive/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/TestLogRolling-testLogRolling=b51662f39c14a78ddb454bac85017be1-fa82039215704c3e936abbc8a5843e22 2024-11-14T09:33:42,777 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731576780315.c3c1078303094fd4e13055e981b40c0f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/0f4fbb71d7e947f1ae28c7b235122bae to hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/archive/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/0f4fbb71d7e947f1ae28c7b235122bae 2024-11-14T09:33:42,778 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731576780315.c3c1078303094fd4e13055e981b40c0f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/e89d41f770734ae2906f3a272e054e69 to hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/archive/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/e89d41f770734ae2906f3a272e054e69 2024-11-14T09:33:42,779 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731576780315.c3c1078303094fd4e13055e981b40c0f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/af4bf3d3cff44190af8e7c8ed8d9ac0a to hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/archive/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/af4bf3d3cff44190af8e7c8ed8d9ac0a 2024-11-14T09:33:42,780 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731576780315.c3c1078303094fd4e13055e981b40c0f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/5f321a956c224c53b0d136f2538657b3 to hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/archive/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/5f321a956c224c53b0d136f2538657b3 2024-11-14T09:33:42,781 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731576780315.c3c1078303094fd4e13055e981b40c0f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/c730dde86f094908b06f984f0f7f61ea to hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/archive/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/c730dde86f094908b06f984f0f7f61ea 2024-11-14T09:33:42,782 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731576780315.c3c1078303094fd4e13055e981b40c0f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/9be5c95cb370412692c99832b101951f to hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/archive/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/9be5c95cb370412692c99832b101951f 2024-11-14T09:33:42,783 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731576780315.c3c1078303094fd4e13055e981b40c0f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/147fa0cf0da2493f97920f97dbcdec72 to hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/archive/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/147fa0cf0da2493f97920f97dbcdec72 2024-11-14T09:33:42,784 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731576780315.c3c1078303094fd4e13055e981b40c0f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/d78c6e9d7b174de7ba7833e909f18684 to hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/archive/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/d78c6e9d7b174de7ba7833e909f18684 2024-11-14T09:33:42,785 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731576780315.c3c1078303094fd4e13055e981b40c0f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/6356fb87e9e345139aab528738a88204 to hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/archive/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/6356fb87e9e345139aab528738a88204 2024-11-14T09:33:42,785 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731576780315.c3c1078303094fd4e13055e981b40c0f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/a2e585d25d034573bb6f524274907b76 to hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/archive/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/a2e585d25d034573bb6f524274907b76 2024-11-14T09:33:42,786 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731576780315.c3c1078303094fd4e13055e981b40c0f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/441b66c50dcc48d3b9c4724c33c14da0 to hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/archive/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/441b66c50dcc48d3b9c4724c33c14da0 2024-11-14T09:33:42,787 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731576780315.c3c1078303094fd4e13055e981b40c0f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/2c141aceed654de88691f4e3e0d0f5a1 to hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/archive/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/2c141aceed654de88691f4e3e0d0f5a1 2024-11-14T09:33:42,788 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731576780315.c3c1078303094fd4e13055e981b40c0f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/29a45b3d143e482dbe1c0becc4a82d69 to hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/archive/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/29a45b3d143e482dbe1c0becc4a82d69 2024-11-14T09:33:42,789 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731576780315.c3c1078303094fd4e13055e981b40c0f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/c4a85e8c6f704a2d87285d80bc7293f4 to hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/archive/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/c4a85e8c6f704a2d87285d80bc7293f4 2024-11-14T09:33:42,790 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731576780315.c3c1078303094fd4e13055e981b40c0f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/66a298faf89c4acd858e8a55c71b5280 to hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/archive/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/66a298faf89c4acd858e8a55c71b5280 2024-11-14T09:33:42,791 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731576780315.c3c1078303094fd4e13055e981b40c0f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/ca3bd8172740457287df3846883fa0e3 to hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/archive/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/ca3bd8172740457287df3846883fa0e3 2024-11-14T09:33:42,792 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731576780315.c3c1078303094fd4e13055e981b40c0f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/85cb459aca3e4803a3ee868a2ab7aad5 to hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/archive/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/info/85cb459aca3e4803a3ee868a2ab7aad5 2024-11-14T09:33:42,792 WARN [StoreCloser-TestLogRolling-testLogRolling,row0062,1731576780315.c3c1078303094fd4e13055e981b40c0f.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [9b170149b86749aea1b3411ffa386013=43081, 0f4fbb71d7e947f1ae28c7b235122bae=12516, e89d41f770734ae2906f3a272e054e69=66967, af4bf3d3cff44190af8e7c8ed8d9ac0a=21156, 5f321a956c224c53b0d136f2538657b3=16828, c730dde86f094908b06f984f0f7f61ea=94096, 9be5c95cb370412692c99832b101951f=20078, 147fa0cf0da2493f97920f97dbcdec72=20078, d78c6e9d7b174de7ba7833e909f18684=116840, 6356fb87e9e345139aab528738a88204=12516, a2e585d25d034573bb6f524274907b76=21156, 441b66c50dcc48d3b9c4724c33c14da0=148409, 2c141aceed654de88691f4e3e0d0f5a1=20078, 29a45b3d143e482dbe1c0becc4a82d69=22251, c4a85e8c6f704a2d87285d80bc7293f4=180886, 66a298faf89c4acd858e8a55c71b5280=20092, ca3bd8172740457287df3846883fa0e3=12523, 85cb459aca3e4803a3ee868a2ab7aad5=23333] 2024-11-14T09:33:42,795 DEBUG [RS_CLOSE_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/data/default/TestLogRolling-testLogRolling/c3c1078303094fd4e13055e981b40c0f/recovered.edits/335.seqid, newMaxSeqId=335, maxSeqId=126 2024-11-14T09:33:42,796 INFO [RS_CLOSE_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,row0062,1731576780315.c3c1078303094fd4e13055e981b40c0f. 2024-11-14T09:33:42,796 DEBUG [RS_CLOSE_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for c3c1078303094fd4e13055e981b40c0f: Waiting for close lock at 1731576822769Running coprocessor pre-close hooks at 1731576822769Disabling compacts and flushes for region at 1731576822769Disabling writes for close at 1731576822769Writing region close event to WAL at 1731576822792 (+23 ms)Running coprocessor post-close hooks at 1731576822795 (+3 ms)Closed at 1731576822795 2024-11-14T09:33:42,796 DEBUG [RS_CLOSE_REGION-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,row0062,1731576780315.c3c1078303094fd4e13055e981b40c0f. 2024-11-14T09:33:42,950 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:33:42,950 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:33:42,963 INFO [RS:0;83f56b55f2af:37631 {}] regionserver.HRegionServer(976): stopping server 83f56b55f2af,37631,1731576765291; all regions closed. 2024-11-14T09:33:42,963 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:33:42,963 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:33:42,964 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:33:42,964 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:33:42,964 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:33:42,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40913 is added to blk_1073741834_1010 (size=8107) 2024-11-14T09:33:42,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39945 is added to blk_1073741834_1010 (size=8107) 2024-11-14T09:33:42,968 DEBUG [RS:0;83f56b55f2af:37631 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/oldWALs 2024-11-14T09:33:42,968 INFO [RS:0;83f56b55f2af:37631 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 83f56b55f2af%2C37631%2C1731576765291.meta:.meta(num 1731576766028) 2024-11-14T09:33:42,968 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:33:42,969 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:33:42,969 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:33:42,969 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:33:42,969 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:33:42,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40913 is added to blk_1073741877_1053 (size=780) 2024-11-14T09:33:42,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39945 is added to blk_1073741877_1053 (size=780) 2024-11-14T09:33:42,972 DEBUG [RS:0;83f56b55f2af:37631 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/oldWALs 2024-11-14T09:33:42,972 INFO [RS:0;83f56b55f2af:37631 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 83f56b55f2af%2C37631%2C1731576765291:(num 1731576822750) 2024-11-14T09:33:42,972 DEBUG [RS:0;83f56b55f2af:37631 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T09:33:42,972 INFO [RS:0;83f56b55f2af:37631 {}] regionserver.LeaseManager(133): Closed leases 2024-11-14T09:33:42,972 INFO [RS:0;83f56b55f2af:37631 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-14T09:33:42,973 INFO [RS:0;83f56b55f2af:37631 {}] hbase.ChoreService(370): Chore service for: regionserver/83f56b55f2af:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-14T09:33:42,973 INFO [RS:0;83f56b55f2af:37631 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-14T09:33:42,973 INFO [regionserver/83f56b55f2af:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-14T09:33:42,973 INFO [RS:0;83f56b55f2af:37631 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37631 2024-11-14T09:33:42,976 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37631-0x10115d285be0001, quorum=127.0.0.1:52182, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/83f56b55f2af,37631,1731576765291 2024-11-14T09:33:42,976 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44845-0x10115d285be0000, quorum=127.0.0.1:52182, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-14T09:33:42,976 INFO [RS:0;83f56b55f2af:37631 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-14T09:33:42,978 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [83f56b55f2af,37631,1731576765291] 2024-11-14T09:33:42,979 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/83f56b55f2af,37631,1731576765291 already deleted, retry=false 2024-11-14T09:33:42,979 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 83f56b55f2af,37631,1731576765291 expired; onlineServers=0 2024-11-14T09:33:42,979 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '83f56b55f2af,44845,1731576765244' ***** 2024-11-14T09:33:42,979 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-14T09:33:42,979 INFO [M:0;83f56b55f2af:44845 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-14T09:33:42,979 INFO [M:0;83f56b55f2af:44845 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-14T09:33:42,979 DEBUG [M:0;83f56b55f2af:44845 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-14T09:33:42,979 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-14T09:33:42,979 DEBUG [M:0;83f56b55f2af:44845 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-14T09:33:42,979 DEBUG [master/83f56b55f2af:0:becomeActiveMaster-HFileCleaner.small.0-1731576765431 {}] cleaner.HFileCleaner(306): Exit Thread[master/83f56b55f2af:0:becomeActiveMaster-HFileCleaner.small.0-1731576765431,5,FailOnTimeoutGroup] 2024-11-14T09:33:42,979 DEBUG [master/83f56b55f2af:0:becomeActiveMaster-HFileCleaner.large.0-1731576765431 {}] cleaner.HFileCleaner(306): Exit Thread[master/83f56b55f2af:0:becomeActiveMaster-HFileCleaner.large.0-1731576765431,5,FailOnTimeoutGroup] 2024-11-14T09:33:42,980 INFO [M:0;83f56b55f2af:44845 {}] hbase.ChoreService(370): Chore service for: master/83f56b55f2af:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-14T09:33:42,980 INFO [M:0;83f56b55f2af:44845 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-14T09:33:42,980 DEBUG [M:0;83f56b55f2af:44845 {}] master.HMaster(1795): Stopping service threads 2024-11-14T09:33:42,980 INFO [M:0;83f56b55f2af:44845 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-14T09:33:42,980 INFO [M:0;83f56b55f2af:44845 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-14T09:33:42,980 INFO [M:0;83f56b55f2af:44845 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-14T09:33:42,980 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-14T09:33:42,981 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44845-0x10115d285be0000, quorum=127.0.0.1:52182, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-14T09:33:42,981 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44845-0x10115d285be0000, quorum=127.0.0.1:52182, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:33:42,981 DEBUG [M:0;83f56b55f2af:44845 {}] zookeeper.ZKUtil(347): master:44845-0x10115d285be0000, quorum=127.0.0.1:52182, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-14T09:33:42,981 WARN [M:0;83f56b55f2af:44845 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-14T09:33:42,982 INFO [M:0;83f56b55f2af:44845 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/.lastflushedseqids 2024-11-14T09:33:42,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40913 is added to blk_1073741878_1054 (size=228) 2024-11-14T09:33:42,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39945 is added to blk_1073741878_1054 (size=228) 2024-11-14T09:33:42,987 INFO [M:0;83f56b55f2af:44845 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-14T09:33:42,987 INFO [M:0;83f56b55f2af:44845 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-14T09:33:42,987 DEBUG [M:0;83f56b55f2af:44845 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-14T09:33:42,987 INFO [M:0;83f56b55f2af:44845 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:33:42,987 DEBUG [M:0;83f56b55f2af:44845 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:33:42,987 DEBUG [M:0;83f56b55f2af:44845 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-14T09:33:42,987 DEBUG [M:0;83f56b55f2af:44845 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:33:42,987 INFO [M:0;83f56b55f2af:44845 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=51.43 KB heapSize=63.36 KB 2024-11-14T09:33:43,003 DEBUG [M:0;83f56b55f2af:44845 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/30a46d186faa487d8e654f084131b7aa is 82, key is hbase:meta,,1/info:regioninfo/1731576766059/Put/seqid=0 2024-11-14T09:33:43,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40913 is added to blk_1073741879_1055 (size=5672) 2024-11-14T09:33:43,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39945 is added to blk_1073741879_1055 (size=5672) 2024-11-14T09:33:43,007 INFO [M:0;83f56b55f2af:44845 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/30a46d186faa487d8e654f084131b7aa 2024-11-14T09:33:43,027 DEBUG [M:0;83f56b55f2af:44845 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ccf4c03dea5449b7bae341c89bfa097d is 751, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731576766482/Put/seqid=0 2024-11-14T09:33:43,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40913 is added to blk_1073741880_1056 (size=7091) 2024-11-14T09:33:43,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39945 is added to blk_1073741880_1056 (size=7091) 2024-11-14T09:33:43,032 INFO [M:0;83f56b55f2af:44845 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=50.83 KB at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ccf4c03dea5449b7bae341c89bfa097d 2024-11-14T09:33:43,036 INFO [M:0;83f56b55f2af:44845 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for ccf4c03dea5449b7bae341c89bfa097d 2024-11-14T09:33:43,050 DEBUG [M:0;83f56b55f2af:44845 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/aaf622278b61469e91b4acf65242708d is 69, key is 83f56b55f2af,37631,1731576765291/rs:state/1731576765524/Put/seqid=0 2024-11-14T09:33:43,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40913 is added to blk_1073741881_1057 (size=5156) 2024-11-14T09:33:43,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39945 is added to blk_1073741881_1057 (size=5156) 2024-11-14T09:33:43,055 INFO [M:0;83f56b55f2af:44845 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/aaf622278b61469e91b4acf65242708d 2024-11-14T09:33:43,074 DEBUG [M:0;83f56b55f2af:44845 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/bf9eb6fd826248fab0977f16dd91cd0f is 52, key is load_balancer_on/state:d/1731576766114/Put/seqid=0 2024-11-14T09:33:43,078 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37631-0x10115d285be0001, quorum=127.0.0.1:52182, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T09:33:43,078 INFO [RS:0;83f56b55f2af:37631 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-14T09:33:43,078 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37631-0x10115d285be0001, quorum=127.0.0.1:52182, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T09:33:43,078 INFO [RS:0;83f56b55f2af:37631 {}] regionserver.HRegionServer(1031): Exiting; stopping=83f56b55f2af,37631,1731576765291; zookeeper connection closed. 2024-11-14T09:33:43,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39945 is added to blk_1073741882_1058 (size=5056) 2024-11-14T09:33:43,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40913 is added to blk_1073741882_1058 (size=5056) 2024-11-14T09:33:43,078 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@238a5fea {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@238a5fea 2024-11-14T09:33:43,078 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-14T09:33:43,079 INFO [M:0;83f56b55f2af:44845 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/bf9eb6fd826248fab0977f16dd91cd0f 2024-11-14T09:33:43,083 DEBUG [M:0;83f56b55f2af:44845 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/30a46d186faa487d8e654f084131b7aa as hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/30a46d186faa487d8e654f084131b7aa 2024-11-14T09:33:43,087 INFO [M:0;83f56b55f2af:44845 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/30a46d186faa487d8e654f084131b7aa, entries=8, sequenceid=125, filesize=5.5 K 2024-11-14T09:33:43,088 DEBUG [M:0;83f56b55f2af:44845 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ccf4c03dea5449b7bae341c89bfa097d as hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/ccf4c03dea5449b7bae341c89bfa097d 2024-11-14T09:33:43,092 INFO [M:0;83f56b55f2af:44845 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for ccf4c03dea5449b7bae341c89bfa097d 2024-11-14T09:33:43,092 INFO [M:0;83f56b55f2af:44845 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/ccf4c03dea5449b7bae341c89bfa097d, entries=13, sequenceid=125, filesize=6.9 K 2024-11-14T09:33:43,093 DEBUG [M:0;83f56b55f2af:44845 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/aaf622278b61469e91b4acf65242708d as hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/aaf622278b61469e91b4acf65242708d 2024-11-14T09:33:43,097 INFO [M:0;83f56b55f2af:44845 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/aaf622278b61469e91b4acf65242708d, entries=1, sequenceid=125, filesize=5.0 K 2024-11-14T09:33:43,097 DEBUG [M:0;83f56b55f2af:44845 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/bf9eb6fd826248fab0977f16dd91cd0f as hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/bf9eb6fd826248fab0977f16dd91cd0f 2024-11-14T09:33:43,101 INFO [M:0;83f56b55f2af:44845 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45921/user/jenkins/test-data/98fcafbf-283a-2140-fd81-a993419368de/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/bf9eb6fd826248fab0977f16dd91cd0f, entries=1, sequenceid=125, filesize=4.9 K 2024-11-14T09:33:43,102 INFO [M:0;83f56b55f2af:44845 {}] regionserver.HRegion(3140): Finished flush of dataSize ~51.43 KB/52663, heapSize ~63.30 KB/64816, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 114ms, sequenceid=125, compaction requested=false 2024-11-14T09:33:43,103 INFO [M:0;83f56b55f2af:44845 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:33:43,103 DEBUG [M:0;83f56b55f2af:44845 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731576822987Disabling compacts and flushes for region at 1731576822987Disabling writes for close at 1731576822987Obtaining lock to block concurrent updates at 1731576822987Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731576822987Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=52663, getHeapSize=64816, getOffHeapSize=0, getCellsCount=148 at 1731576822987Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731576822988 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731576822988Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731576823002 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731576823002Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731576823011 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731576823026 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731576823026Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731576823036 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731576823050 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731576823050Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731576823060 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731576823074 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731576823074Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7d4041f9: reopening flushed file at 1731576823082 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6aa16be9: reopening flushed file at 1731576823087 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2d100c67: reopening flushed file at 1731576823092 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1d0b391c: reopening flushed file at 1731576823097 (+5 ms)Finished flush of dataSize ~51.43 KB/52663, heapSize ~63.30 KB/64816, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 114ms, sequenceid=125, compaction requested=false at 1731576823102 (+5 ms)Writing region close event to WAL at 1731576823103 (+1 ms)Closed at 1731576823103 2024-11-14T09:33:43,103 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:33:43,103 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:33:43,104 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:33:43,104 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:33:43,104 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:33:43,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40913 is added to blk_1073741830_1006 (size=61332) 2024-11-14T09:33:43,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39945 is added to blk_1073741830_1006 (size=61332) 2024-11-14T09:33:43,106 INFO [M:0;83f56b55f2af:44845 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-14T09:33:43,106 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-14T09:33:43,106 INFO [M:0;83f56b55f2af:44845 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44845 2024-11-14T09:33:43,106 INFO [M:0;83f56b55f2af:44845 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-14T09:33:43,209 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44845-0x10115d285be0000, quorum=127.0.0.1:52182, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T09:33:43,209 INFO [M:0;83f56b55f2af:44845 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-14T09:33:43,209 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44845-0x10115d285be0000, quorum=127.0.0.1:52182, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T09:33:43,212 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@771d3856{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T09:33:43,212 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1fdd1c8b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T09:33:43,213 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T09:33:43,213 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@58157e93{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T09:33:43,213 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@d35d031{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/960e219b-11d8-1120-b96a-489c35f8c7b6/hadoop.log.dir/,STOPPED} 2024-11-14T09:33:43,214 WARN [BP-1299273729-172.17.0.2-1731576764523 heartbeating to localhost/127.0.0.1:45921 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T09:33:43,215 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T09:33:43,215 WARN [BP-1299273729-172.17.0.2-1731576764523 heartbeating to localhost/127.0.0.1:45921 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1299273729-172.17.0.2-1731576764523 (Datanode Uuid 43daac12-c975-4a54-8ae8-e39dbc73a6f7) service to localhost/127.0.0.1:45921 2024-11-14T09:33:43,215 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T09:33:43,215 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/960e219b-11d8-1120-b96a-489c35f8c7b6/cluster_b2af9291-0691-383f-0a31-3dfb60af2861/data/data3/current/BP-1299273729-172.17.0.2-1731576764523 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T09:33:43,215 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/960e219b-11d8-1120-b96a-489c35f8c7b6/cluster_b2af9291-0691-383f-0a31-3dfb60af2861/data/data4/current/BP-1299273729-172.17.0.2-1731576764523 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T09:33:43,215 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T09:33:43,217 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@44ec820f{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T09:33:43,218 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@13dbd1c9{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T09:33:43,218 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T09:33:43,218 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@324063a7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T09:33:43,218 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@29d3ea34{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/960e219b-11d8-1120-b96a-489c35f8c7b6/hadoop.log.dir/,STOPPED} 2024-11-14T09:33:43,219 WARN [BP-1299273729-172.17.0.2-1731576764523 heartbeating to localhost/127.0.0.1:45921 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T09:33:43,219 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T09:33:43,219 WARN [BP-1299273729-172.17.0.2-1731576764523 heartbeating to localhost/127.0.0.1:45921 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1299273729-172.17.0.2-1731576764523 (Datanode Uuid fa0404ab-2878-4db9-98f9-58922f43aaea) service to localhost/127.0.0.1:45921 2024-11-14T09:33:43,220 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T09:33:43,220 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/960e219b-11d8-1120-b96a-489c35f8c7b6/cluster_b2af9291-0691-383f-0a31-3dfb60af2861/data/data1/current/BP-1299273729-172.17.0.2-1731576764523 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T09:33:43,220 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/960e219b-11d8-1120-b96a-489c35f8c7b6/cluster_b2af9291-0691-383f-0a31-3dfb60af2861/data/data2/current/BP-1299273729-172.17.0.2-1731576764523 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T09:33:43,220 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T09:33:43,226 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@57cd8a13{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-14T09:33:43,227 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5eb11345{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T09:33:43,227 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T09:33:43,227 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2877d055{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T09:33:43,227 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@162d5848{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/960e219b-11d8-1120-b96a-489c35f8c7b6/hadoop.log.dir/,STOPPED} 2024-11-14T09:33:43,234 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-14T09:33:43,261 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-14T09:33:43,273 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRolling Thread=230 (was 206) Potentially hanging thread: nioEventLoopGroup-38-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1709469494) connection to localhost/127.0.0.1:45921 from jenkins.hfs.6 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (1709469494) connection to localhost/127.0.0.1:45921 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-41-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45921 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:45921 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1709469494) connection to localhost/127.0.0.1:45921 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-14-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.6@localhost:45921 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45921 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45921 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=512 (was 485) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=27 (was 41), ProcessCount=11 (was 11), AvailableMemoryMB=6470 (was 6503) 2024-11-14T09:33:43,281 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=230, OpenFileDescriptor=512, MaxFileDescriptor=1048576, SystemLoadAverage=27, ProcessCount=11, AvailableMemoryMB=6469 2024-11-14T09:33:43,281 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-14T09:33:43,281 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/960e219b-11d8-1120-b96a-489c35f8c7b6/hadoop.log.dir so I do NOT create it in target/test-data/269359ac-9a66-a599-705f-239acd5fbc7d 2024-11-14T09:33:43,281 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/960e219b-11d8-1120-b96a-489c35f8c7b6/hadoop.tmp.dir so I do NOT create it in target/test-data/269359ac-9a66-a599-705f-239acd5fbc7d 2024-11-14T09:33:43,281 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/269359ac-9a66-a599-705f-239acd5fbc7d/cluster_d45ae119-9bce-d3ef-f172-614ffa505192, deleteOnExit=true 2024-11-14T09:33:43,281 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-14T09:33:43,281 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/269359ac-9a66-a599-705f-239acd5fbc7d/test.cache.data in system properties and HBase conf 2024-11-14T09:33:43,281 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/269359ac-9a66-a599-705f-239acd5fbc7d/hadoop.tmp.dir in system properties and HBase conf 2024-11-14T09:33:43,281 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/269359ac-9a66-a599-705f-239acd5fbc7d/hadoop.log.dir in system properties and HBase conf 2024-11-14T09:33:43,281 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/269359ac-9a66-a599-705f-239acd5fbc7d/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-14T09:33:43,281 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/269359ac-9a66-a599-705f-239acd5fbc7d/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-14T09:33:43,281 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-14T09:33:43,282 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-14T09:33:43,282 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/269359ac-9a66-a599-705f-239acd5fbc7d/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-14T09:33:43,282 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/269359ac-9a66-a599-705f-239acd5fbc7d/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-14T09:33:43,282 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/269359ac-9a66-a599-705f-239acd5fbc7d/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-14T09:33:43,282 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/269359ac-9a66-a599-705f-239acd5fbc7d/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-14T09:33:43,282 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/269359ac-9a66-a599-705f-239acd5fbc7d/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-14T09:33:43,282 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/269359ac-9a66-a599-705f-239acd5fbc7d/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-14T09:33:43,282 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/269359ac-9a66-a599-705f-239acd5fbc7d/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-14T09:33:43,282 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/269359ac-9a66-a599-705f-239acd5fbc7d/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-14T09:33:43,282 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/269359ac-9a66-a599-705f-239acd5fbc7d/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-14T09:33:43,282 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/269359ac-9a66-a599-705f-239acd5fbc7d/nfs.dump.dir in system properties and HBase conf 2024-11-14T09:33:43,282 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/269359ac-9a66-a599-705f-239acd5fbc7d/java.io.tmpdir in system properties and HBase conf 2024-11-14T09:33:43,282 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/269359ac-9a66-a599-705f-239acd5fbc7d/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-14T09:33:43,282 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/269359ac-9a66-a599-705f-239acd5fbc7d/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-14T09:33:43,282 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/269359ac-9a66-a599-705f-239acd5fbc7d/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-14T09:33:43,295 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-14T09:33:43,355 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T09:33:43,359 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T09:33:43,360 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T09:33:43,360 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T09:33:43,360 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-14T09:33:43,362 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T09:33:43,363 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5b0b2b30{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/269359ac-9a66-a599-705f-239acd5fbc7d/hadoop.log.dir/,AVAILABLE} 2024-11-14T09:33:43,363 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@622c554c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T09:33:43,477 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6ce65df0{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/269359ac-9a66-a599-705f-239acd5fbc7d/java.io.tmpdir/jetty-localhost-43585-hadoop-hdfs-3_4_1-tests_jar-_-any-9910518225022471917/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-14T09:33:43,478 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5859719{HTTP/1.1, (http/1.1)}{localhost:43585} 2024-11-14T09:33:43,478 INFO [Time-limited test {}] server.Server(415): Started @297664ms 2024-11-14T09:33:43,490 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-14T09:33:43,537 INFO [regionserver/83f56b55f2af:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-14T09:33:43,541 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T09:33:43,543 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T09:33:43,544 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T09:33:43,544 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T09:33:43,544 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-14T09:33:43,544 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@10993583{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/269359ac-9a66-a599-705f-239acd5fbc7d/hadoop.log.dir/,AVAILABLE} 2024-11-14T09:33:43,545 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@44209567{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T09:33:43,658 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@67e3c4de{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/269359ac-9a66-a599-705f-239acd5fbc7d/java.io.tmpdir/jetty-localhost-46767-hadoop-hdfs-3_4_1-tests_jar-_-any-16795667420417130415/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T09:33:43,659 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5e22b5d8{HTTP/1.1, (http/1.1)}{localhost:46767} 2024-11-14T09:33:43,659 INFO [Time-limited test {}] server.Server(415): Started @297845ms 2024-11-14T09:33:43,660 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T09:33:43,689 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T09:33:43,691 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T09:33:43,692 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T09:33:43,692 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T09:33:43,692 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-14T09:33:43,692 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@694aa09b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/269359ac-9a66-a599-705f-239acd5fbc7d/hadoop.log.dir/,AVAILABLE} 2024-11-14T09:33:43,693 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@69ffafd5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T09:33:43,752 WARN [Thread-2486 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/269359ac-9a66-a599-705f-239acd5fbc7d/cluster_d45ae119-9bce-d3ef-f172-614ffa505192/data/data1/current/BP-1950380783-172.17.0.2-1731576823300/current, will proceed with Du for space computation calculation, 2024-11-14T09:33:43,752 WARN [Thread-2487 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/269359ac-9a66-a599-705f-239acd5fbc7d/cluster_d45ae119-9bce-d3ef-f172-614ffa505192/data/data2/current/BP-1950380783-172.17.0.2-1731576823300/current, will proceed with Du for space computation calculation, 2024-11-14T09:33:43,768 WARN [Thread-2465 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T09:33:43,771 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x777cec71a813b4bf with lease ID 0xa0bc13b6d23f8693: Processing first storage report for DS-f68fc25c-88c8-4f17-8e57-b6352e4b2c69 from datanode DatanodeRegistration(127.0.0.1:46029, datanodeUuid=d6c1c73d-9a80-4ea9-9e58-bf836d65eaaf, infoPort=39179, infoSecurePort=0, ipcPort=38969, storageInfo=lv=-57;cid=testClusterID;nsid=444717903;c=1731576823300) 2024-11-14T09:33:43,771 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x777cec71a813b4bf with lease ID 0xa0bc13b6d23f8693: from storage DS-f68fc25c-88c8-4f17-8e57-b6352e4b2c69 node DatanodeRegistration(127.0.0.1:46029, datanodeUuid=d6c1c73d-9a80-4ea9-9e58-bf836d65eaaf, infoPort=39179, infoSecurePort=0, ipcPort=38969, storageInfo=lv=-57;cid=testClusterID;nsid=444717903;c=1731576823300), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T09:33:43,771 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x777cec71a813b4bf with lease ID 0xa0bc13b6d23f8693: Processing first storage report for DS-0b04c8ed-fa81-4659-80e3-766e94a74b0a from datanode DatanodeRegistration(127.0.0.1:46029, datanodeUuid=d6c1c73d-9a80-4ea9-9e58-bf836d65eaaf, infoPort=39179, infoSecurePort=0, ipcPort=38969, storageInfo=lv=-57;cid=testClusterID;nsid=444717903;c=1731576823300) 2024-11-14T09:33:43,771 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x777cec71a813b4bf with lease ID 0xa0bc13b6d23f8693: from storage DS-0b04c8ed-fa81-4659-80e3-766e94a74b0a node DatanodeRegistration(127.0.0.1:46029, datanodeUuid=d6c1c73d-9a80-4ea9-9e58-bf836d65eaaf, infoPort=39179, infoSecurePort=0, ipcPort=38969, storageInfo=lv=-57;cid=testClusterID;nsid=444717903;c=1731576823300), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T09:33:43,813 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@26b7b1af{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/269359ac-9a66-a599-705f-239acd5fbc7d/java.io.tmpdir/jetty-localhost-44029-hadoop-hdfs-3_4_1-tests_jar-_-any-14668913322534982969/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T09:33:43,813 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@64daf748{HTTP/1.1, (http/1.1)}{localhost:44029} 2024-11-14T09:33:43,813 INFO [Time-limited test {}] server.Server(415): Started @298000ms 2024-11-14T09:33:43,815 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T09:33:43,939 WARN [Thread-2513 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/269359ac-9a66-a599-705f-239acd5fbc7d/cluster_d45ae119-9bce-d3ef-f172-614ffa505192/data/data4/current/BP-1950380783-172.17.0.2-1731576823300/current, will proceed with Du for space computation calculation, 2024-11-14T09:33:43,939 WARN [Thread-2512 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/269359ac-9a66-a599-705f-239acd5fbc7d/cluster_d45ae119-9bce-d3ef-f172-614ffa505192/data/data3/current/BP-1950380783-172.17.0.2-1731576823300/current, will proceed with Du for space computation calculation, 2024-11-14T09:33:43,950 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:33:43,950 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:33:43,955 WARN [Thread-2501 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T09:33:43,957 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc432f250c4e0c74b with lease ID 0xa0bc13b6d23f8694: Processing first storage report for DS-a06c706c-7c32-432e-a567-6c29e0ece958 from datanode DatanodeRegistration(127.0.0.1:36151, datanodeUuid=291f43d3-6b29-47c3-baee-024dac255524, infoPort=45069, infoSecurePort=0, ipcPort=40039, storageInfo=lv=-57;cid=testClusterID;nsid=444717903;c=1731576823300) 2024-11-14T09:33:43,958 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc432f250c4e0c74b with lease ID 0xa0bc13b6d23f8694: from storage DS-a06c706c-7c32-432e-a567-6c29e0ece958 node DatanodeRegistration(127.0.0.1:36151, datanodeUuid=291f43d3-6b29-47c3-baee-024dac255524, infoPort=45069, infoSecurePort=0, ipcPort=40039, storageInfo=lv=-57;cid=testClusterID;nsid=444717903;c=1731576823300), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T09:33:43,958 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc432f250c4e0c74b with lease ID 0xa0bc13b6d23f8694: Processing first storage report for DS-b4d0c955-d2b6-4207-90f5-14811bae809d from datanode DatanodeRegistration(127.0.0.1:36151, datanodeUuid=291f43d3-6b29-47c3-baee-024dac255524, infoPort=45069, infoSecurePort=0, ipcPort=40039, storageInfo=lv=-57;cid=testClusterID;nsid=444717903;c=1731576823300) 2024-11-14T09:33:43,958 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc432f250c4e0c74b with lease ID 0xa0bc13b6d23f8694: from storage DS-b4d0c955-d2b6-4207-90f5-14811bae809d node DatanodeRegistration(127.0.0.1:36151, datanodeUuid=291f43d3-6b29-47c3-baee-024dac255524, infoPort=45069, infoSecurePort=0, ipcPort=40039, storageInfo=lv=-57;cid=testClusterID;nsid=444717903;c=1731576823300), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T09:33:44,039 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/269359ac-9a66-a599-705f-239acd5fbc7d 2024-11-14T09:33:44,042 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/269359ac-9a66-a599-705f-239acd5fbc7d/cluster_d45ae119-9bce-d3ef-f172-614ffa505192/zookeeper_0, clientPort=56788, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/269359ac-9a66-a599-705f-239acd5fbc7d/cluster_d45ae119-9bce-d3ef-f172-614ffa505192/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/269359ac-9a66-a599-705f-239acd5fbc7d/cluster_d45ae119-9bce-d3ef-f172-614ffa505192/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-14T09:33:44,042 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=56788 2024-11-14T09:33:44,043 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:33:44,044 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:33:44,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46029 is added to blk_1073741825_1001 (size=7) 2024-11-14T09:33:44,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36151 is added to blk_1073741825_1001 (size=7) 2024-11-14T09:33:44,053 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:45383/user/jenkins/test-data/8d2230b7-a540-cbf9-31dd-c5c0c375f6d6 with version=8 2024-11-14T09:33:44,053 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:33501/user/jenkins/test-data/06faac0d-7553-fd0a-246a-c1c07592c22a/hbase-staging 2024-11-14T09:33:44,055 INFO [Time-limited test {}] client.ConnectionUtils(128): master/83f56b55f2af:0 server-side Connection retries=45 2024-11-14T09:33:44,056 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T09:33:44,056 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-14T09:33:44,056 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-14T09:33:44,056 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T09:33:44,056 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-14T09:33:44,056 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-14T09:33:44,056 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-14T09:33:44,057 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37969 2024-11-14T09:33:44,058 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:37969 connecting to ZooKeeper ensemble=127.0.0.1:56788 2024-11-14T09:33:44,068 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:379690x0, quorum=127.0.0.1:56788, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-14T09:33:44,068 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:37969-0x10115d36b790000 connected 2024-11-14T09:33:44,085 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:33:44,086 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:33:44,088 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37969-0x10115d36b790000, quorum=127.0.0.1:56788, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T09:33:44,088 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:45383/user/jenkins/test-data/8d2230b7-a540-cbf9-31dd-c5c0c375f6d6, hbase.cluster.distributed=false 2024-11-14T09:33:44,090 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37969-0x10115d36b790000, quorum=127.0.0.1:56788, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-14T09:33:44,091 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37969 2024-11-14T09:33:44,091 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37969 2024-11-14T09:33:44,091 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37969 2024-11-14T09:33:44,091 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37969 2024-11-14T09:33:44,094 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37969 2024-11-14T09:33:44,108 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/83f56b55f2af:0 server-side Connection retries=45 2024-11-14T09:33:44,108 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T09:33:44,108 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-14T09:33:44,108 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-14T09:33:44,108 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T09:33:44,108 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-14T09:33:44,108 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-14T09:33:44,108 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-14T09:33:44,109 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35041 2024-11-14T09:33:44,110 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:35041 connecting to ZooKeeper ensemble=127.0.0.1:56788 2024-11-14T09:33:44,110 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:33:44,112 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:33:44,116 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:350410x0, quorum=127.0.0.1:56788, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-14T09:33:44,116 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35041-0x10115d36b790001, quorum=127.0.0.1:56788, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T09:33:44,116 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:35041-0x10115d36b790001 connected 2024-11-14T09:33:44,116 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-14T09:33:44,117 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-14T09:33:44,117 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35041-0x10115d36b790001, quorum=127.0.0.1:56788, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-14T09:33:44,118 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35041-0x10115d36b790001, quorum=127.0.0.1:56788, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-14T09:33:44,118 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35041 2024-11-14T09:33:44,119 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35041 2024-11-14T09:33:44,119 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35041 2024-11-14T09:33:44,119 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35041 2024-11-14T09:33:44,119 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35041 2024-11-14T09:33:44,130 DEBUG [M:0;83f56b55f2af:37969 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;83f56b55f2af:37969 2024-11-14T09:33:44,131 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/83f56b55f2af,37969,1731576824055 2024-11-14T09:33:44,132 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37969-0x10115d36b790000, quorum=127.0.0.1:56788, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T09:33:44,132 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35041-0x10115d36b790001, quorum=127.0.0.1:56788, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T09:33:44,133 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37969-0x10115d36b790000, quorum=127.0.0.1:56788, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/83f56b55f2af,37969,1731576824055 2024-11-14T09:33:44,136 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35041-0x10115d36b790001, quorum=127.0.0.1:56788, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-14T09:33:44,136 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37969-0x10115d36b790000, quorum=127.0.0.1:56788, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:33:44,136 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35041-0x10115d36b790001, quorum=127.0.0.1:56788, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:33:44,136 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37969-0x10115d36b790000, quorum=127.0.0.1:56788, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-14T09:33:44,136 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/83f56b55f2af,37969,1731576824055 from backup master directory 2024-11-14T09:33:44,138 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37969-0x10115d36b790000, quorum=127.0.0.1:56788, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/83f56b55f2af,37969,1731576824055 2024-11-14T09:33:44,138 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35041-0x10115d36b790001, quorum=127.0.0.1:56788, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T09:33:44,138 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37969-0x10115d36b790000, quorum=127.0.0.1:56788, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T09:33:44,138 WARN [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-14T09:33:44,138 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=83f56b55f2af,37969,1731576824055 2024-11-14T09:33:44,141 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:45383/user/jenkins/test-data/8d2230b7-a540-cbf9-31dd-c5c0c375f6d6/hbase.id] with ID: 0bec415a-c62c-4d4f-ac87-50f8400a818b 2024-11-14T09:33:44,141 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:45383/user/jenkins/test-data/8d2230b7-a540-cbf9-31dd-c5c0c375f6d6/.tmp/hbase.id 2024-11-14T09:33:44,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36151 is added to blk_1073741826_1002 (size=42) 2024-11-14T09:33:44,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46029 is added to blk_1073741826_1002 (size=42) 2024-11-14T09:33:44,147 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:45383/user/jenkins/test-data/8d2230b7-a540-cbf9-31dd-c5c0c375f6d6/.tmp/hbase.id]:[hdfs://localhost:45383/user/jenkins/test-data/8d2230b7-a540-cbf9-31dd-c5c0c375f6d6/hbase.id] 2024-11-14T09:33:44,156 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:33:44,156 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-14T09:33:44,157 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-14T09:33:44,159 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37969-0x10115d36b790000, quorum=127.0.0.1:56788, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:33:44,159 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35041-0x10115d36b790001, quorum=127.0.0.1:56788, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:33:44,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36151 is added to blk_1073741827_1003 (size=196) 2024-11-14T09:33:44,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46029 is added to blk_1073741827_1003 (size=196) 2024-11-14T09:33:44,166 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-14T09:33:44,167 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-14T09:33:44,167 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T09:33:44,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36151 is added to blk_1073741828_1004 (size=1189) 2024-11-14T09:33:44,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46029 is added to blk_1073741828_1004 (size=1189) 2024-11-14T09:33:44,174 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:45383/user/jenkins/test-data/8d2230b7-a540-cbf9-31dd-c5c0c375f6d6/MasterData/data/master/store 2024-11-14T09:33:44,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46029 is added to blk_1073741829_1005 (size=34) 2024-11-14T09:33:44,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36151 is added to blk_1073741829_1005 (size=34) 2024-11-14T09:33:44,180 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T09:33:44,180 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-14T09:33:44,180 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:33:44,180 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:33:44,180 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-14T09:33:44,180 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:33:44,180 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:33:44,180 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731576824180Disabling compacts and flushes for region at 1731576824180Disabling writes for close at 1731576824180Writing region close event to WAL at 1731576824180Closed at 1731576824180 2024-11-14T09:33:44,181 WARN [master/83f56b55f2af:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:45383/user/jenkins/test-data/8d2230b7-a540-cbf9-31dd-c5c0c375f6d6/MasterData/data/master/store/.initializing 2024-11-14T09:33:44,181 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:45383/user/jenkins/test-data/8d2230b7-a540-cbf9-31dd-c5c0c375f6d6/MasterData/WALs/83f56b55f2af,37969,1731576824055 2024-11-14T09:33:44,183 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=83f56b55f2af%2C37969%2C1731576824055, suffix=, logDir=hdfs://localhost:45383/user/jenkins/test-data/8d2230b7-a540-cbf9-31dd-c5c0c375f6d6/MasterData/WALs/83f56b55f2af,37969,1731576824055, archiveDir=hdfs://localhost:45383/user/jenkins/test-data/8d2230b7-a540-cbf9-31dd-c5c0c375f6d6/MasterData/oldWALs, maxLogs=10 2024-11-14T09:33:44,183 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 83f56b55f2af%2C37969%2C1731576824055.1731576824183 2024-11-14T09:33:44,187 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/8d2230b7-a540-cbf9-31dd-c5c0c375f6d6/MasterData/WALs/83f56b55f2af,37969,1731576824055/83f56b55f2af%2C37969%2C1731576824055.1731576824183 2024-11-14T09:33:44,187 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39179:39179),(127.0.0.1/127.0.0.1:45069:45069)] 2024-11-14T09:33:44,188 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-14T09:33:44,188 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T09:33:44,188 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:33:44,188 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:33:44,189 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:33:44,190 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-14T09:33:44,191 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:33:44,191 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:33:44,191 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:33:44,192 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-14T09:33:44,192 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:33:44,192 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T09:33:44,193 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:33:44,193 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-14T09:33:44,193 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:33:44,194 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T09:33:44,194 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:33:44,195 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-14T09:33:44,195 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:33:44,195 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T09:33:44,195 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:33:44,196 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45383/user/jenkins/test-data/8d2230b7-a540-cbf9-31dd-c5c0c375f6d6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:33:44,196 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45383/user/jenkins/test-data/8d2230b7-a540-cbf9-31dd-c5c0c375f6d6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:33:44,197 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:33:44,197 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:33:44,197 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-14T09:33:44,198 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:33:44,200 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45383/user/jenkins/test-data/8d2230b7-a540-cbf9-31dd-c5c0c375f6d6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T09:33:44,200 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=836266, jitterRate=0.0633673369884491}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-14T09:33:44,201 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731576824188Initializing all the Stores at 1731576824189 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731576824189Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731576824189Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731576824189Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731576824189Cleaning up temporary data from old regions at 1731576824197 (+8 ms)Region opened successfully at 1731576824201 (+4 ms) 2024-11-14T09:33:44,202 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-14T09:33:44,205 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@620cf527, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=83f56b55f2af/172.17.0.2:0 2024-11-14T09:33:44,206 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-14T09:33:44,206 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-14T09:33:44,206 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-14T09:33:44,206 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-14T09:33:44,206 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-14T09:33:44,207 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-14T09:33:44,207 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-14T09:33:44,209 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-14T09:33:44,209 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37969-0x10115d36b790000, quorum=127.0.0.1:56788, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-14T09:33:44,211 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-14T09:33:44,211 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-14T09:33:44,211 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37969-0x10115d36b790000, quorum=127.0.0.1:56788, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-14T09:33:44,214 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-14T09:33:44,214 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-14T09:33:44,215 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37969-0x10115d36b790000, quorum=127.0.0.1:56788, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-14T09:33:44,216 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-14T09:33:44,217 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37969-0x10115d36b790000, quorum=127.0.0.1:56788, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-14T09:33:44,218 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-14T09:33:44,219 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37969-0x10115d36b790000, quorum=127.0.0.1:56788, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-14T09:33:44,221 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-14T09:33:44,223 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37969-0x10115d36b790000, quorum=127.0.0.1:56788, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-14T09:33:44,223 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35041-0x10115d36b790001, quorum=127.0.0.1:56788, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-14T09:33:44,223 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35041-0x10115d36b790001, quorum=127.0.0.1:56788, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:33:44,223 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37969-0x10115d36b790000, quorum=127.0.0.1:56788, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:33:44,224 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=83f56b55f2af,37969,1731576824055, sessionid=0x10115d36b790000, setting cluster-up flag (Was=false) 2024-11-14T09:33:44,226 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37969-0x10115d36b790000, quorum=127.0.0.1:56788, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:33:44,226 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35041-0x10115d36b790001, quorum=127.0.0.1:56788, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:33:44,232 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-14T09:33:44,233 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=83f56b55f2af,37969,1731576824055 2024-11-14T09:33:44,236 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37969-0x10115d36b790000, quorum=127.0.0.1:56788, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:33:44,236 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35041-0x10115d36b790001, quorum=127.0.0.1:56788, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:33:44,241 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-14T09:33:44,242 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=83f56b55f2af,37969,1731576824055 2024-11-14T09:33:44,243 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:45383/user/jenkins/test-data/8d2230b7-a540-cbf9-31dd-c5c0c375f6d6/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-14T09:33:44,244 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-14T09:33:44,244 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-14T09:33:44,245 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-14T09:33:44,245 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 83f56b55f2af,37969,1731576824055 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-14T09:33:44,246 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/83f56b55f2af:0, corePoolSize=5, maxPoolSize=5 2024-11-14T09:33:44,246 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/83f56b55f2af:0, corePoolSize=5, maxPoolSize=5 2024-11-14T09:33:44,246 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/83f56b55f2af:0, corePoolSize=5, maxPoolSize=5 2024-11-14T09:33:44,246 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/83f56b55f2af:0, corePoolSize=5, maxPoolSize=5 2024-11-14T09:33:44,246 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/83f56b55f2af:0, corePoolSize=10, maxPoolSize=10 2024-11-14T09:33:44,246 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:33:44,246 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/83f56b55f2af:0, corePoolSize=2, maxPoolSize=2 2024-11-14T09:33:44,246 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:33:44,247 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731576854247 2024-11-14T09:33:44,247 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-14T09:33:44,247 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-14T09:33:44,247 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-14T09:33:44,247 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-14T09:33:44,247 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-14T09:33:44,247 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-14T09:33:44,247 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T09:33:44,247 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-14T09:33:44,247 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-14T09:33:44,248 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-14T09:33:44,248 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-14T09:33:44,248 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-14T09:33:44,248 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-14T09:33:44,248 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-14T09:33:44,248 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:33:44,248 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-14T09:33:44,249 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/83f56b55f2af:0:becomeActiveMaster-HFileCleaner.large.0-1731576824248,5,FailOnTimeoutGroup] 2024-11-14T09:33:44,249 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/83f56b55f2af:0:becomeActiveMaster-HFileCleaner.small.0-1731576824249,5,FailOnTimeoutGroup] 2024-11-14T09:33:44,249 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-14T09:33:44,249 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-14T09:33:44,249 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-14T09:33:44,249 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-14T09:33:44,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36151 is added to blk_1073741831_1007 (size=1321) 2024-11-14T09:33:44,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46029 is added to blk_1073741831_1007 (size=1321) 2024-11-14T09:33:44,254 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:45383/user/jenkins/test-data/8d2230b7-a540-cbf9-31dd-c5c0c375f6d6/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-14T09:33:44,254 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:45383/user/jenkins/test-data/8d2230b7-a540-cbf9-31dd-c5c0c375f6d6 2024-11-14T09:33:44,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36151 is added to blk_1073741832_1008 (size=32) 2024-11-14T09:33:44,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46029 is added to blk_1073741832_1008 (size=32) 2024-11-14T09:33:44,260 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T09:33:44,261 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-14T09:33:44,262 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-14T09:33:44,262 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:33:44,262 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:33:44,262 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-14T09:33:44,263 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-14T09:33:44,263 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:33:44,263 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:33:44,263 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-14T09:33:44,264 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-14T09:33:44,264 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:33:44,265 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:33:44,265 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-14T09:33:44,265 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-14T09:33:44,265 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:33:44,266 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:33:44,266 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-14T09:33:44,266 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45383/user/jenkins/test-data/8d2230b7-a540-cbf9-31dd-c5c0c375f6d6/data/hbase/meta/1588230740 2024-11-14T09:33:44,267 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45383/user/jenkins/test-data/8d2230b7-a540-cbf9-31dd-c5c0c375f6d6/data/hbase/meta/1588230740 2024-11-14T09:33:44,268 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-14T09:33:44,268 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-14T09:33:44,268 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-14T09:33:44,269 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-14T09:33:44,270 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45383/user/jenkins/test-data/8d2230b7-a540-cbf9-31dd-c5c0c375f6d6/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T09:33:44,271 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=831112, jitterRate=0.05681423842906952}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-14T09:33:44,271 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731576824260Initializing all the Stores at 1731576824260Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731576824260Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731576824260Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731576824260Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731576824260Cleaning up temporary data from old regions at 1731576824268 (+8 ms)Region opened successfully at 1731576824271 (+3 ms) 2024-11-14T09:33:44,271 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-14T09:33:44,271 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-14T09:33:44,271 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-14T09:33:44,271 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-14T09:33:44,271 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-14T09:33:44,272 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-14T09:33:44,272 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731576824271Disabling compacts and flushes for region at 1731576824271Disabling writes for close at 1731576824271Writing region close event to WAL at 1731576824272 (+1 ms)Closed at 1731576824272 2024-11-14T09:33:44,273 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T09:33:44,273 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-14T09:33:44,273 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-14T09:33:44,274 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-14T09:33:44,274 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-14T09:33:44,321 INFO [RS:0;83f56b55f2af:35041 {}] regionserver.HRegionServer(746): ClusterId : 0bec415a-c62c-4d4f-ac87-50f8400a818b 2024-11-14T09:33:44,321 DEBUG [RS:0;83f56b55f2af:35041 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-14T09:33:44,323 DEBUG [RS:0;83f56b55f2af:35041 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-14T09:33:44,323 DEBUG [RS:0;83f56b55f2af:35041 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-14T09:33:44,327 DEBUG [RS:0;83f56b55f2af:35041 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-14T09:33:44,327 DEBUG [RS:0;83f56b55f2af:35041 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4a2024c6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=83f56b55f2af/172.17.0.2:0 2024-11-14T09:33:44,339 DEBUG [RS:0;83f56b55f2af:35041 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;83f56b55f2af:35041 2024-11-14T09:33:44,339 INFO [RS:0;83f56b55f2af:35041 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-14T09:33:44,339 INFO [RS:0;83f56b55f2af:35041 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-14T09:33:44,339 DEBUG [RS:0;83f56b55f2af:35041 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-14T09:33:44,339 INFO [RS:0;83f56b55f2af:35041 {}] regionserver.HRegionServer(2659): reportForDuty to master=83f56b55f2af,37969,1731576824055 with port=35041, startcode=1731576824108 2024-11-14T09:33:44,340 DEBUG [RS:0;83f56b55f2af:35041 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-14T09:33:44,342 INFO [HMaster-EventLoopGroup-16-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53383, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.7 (auth:SIMPLE), service=RegionServerStatusService 2024-11-14T09:33:44,342 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37969 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 83f56b55f2af,35041,1731576824108 2024-11-14T09:33:44,342 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37969 {}] master.ServerManager(517): Registering regionserver=83f56b55f2af,35041,1731576824108 2024-11-14T09:33:44,343 DEBUG [RS:0;83f56b55f2af:35041 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:45383/user/jenkins/test-data/8d2230b7-a540-cbf9-31dd-c5c0c375f6d6 2024-11-14T09:33:44,344 DEBUG [RS:0;83f56b55f2af:35041 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:45383 2024-11-14T09:33:44,344 DEBUG [RS:0;83f56b55f2af:35041 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-14T09:33:44,345 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37969-0x10115d36b790000, quorum=127.0.0.1:56788, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-14T09:33:44,346 DEBUG [RS:0;83f56b55f2af:35041 {}] zookeeper.ZKUtil(111): regionserver:35041-0x10115d36b790001, quorum=127.0.0.1:56788, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/83f56b55f2af,35041,1731576824108 2024-11-14T09:33:44,346 WARN [RS:0;83f56b55f2af:35041 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-14T09:33:44,346 INFO [RS:0;83f56b55f2af:35041 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T09:33:44,346 DEBUG [RS:0;83f56b55f2af:35041 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:45383/user/jenkins/test-data/8d2230b7-a540-cbf9-31dd-c5c0c375f6d6/WALs/83f56b55f2af,35041,1731576824108 2024-11-14T09:33:44,346 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [83f56b55f2af,35041,1731576824108] 2024-11-14T09:33:44,349 INFO [RS:0;83f56b55f2af:35041 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-14T09:33:44,350 INFO [RS:0;83f56b55f2af:35041 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-14T09:33:44,350 INFO [RS:0;83f56b55f2af:35041 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-14T09:33:44,350 INFO [RS:0;83f56b55f2af:35041 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T09:33:44,351 INFO [RS:0;83f56b55f2af:35041 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-14T09:33:44,351 INFO [RS:0;83f56b55f2af:35041 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-14T09:33:44,351 INFO [RS:0;83f56b55f2af:35041 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-14T09:33:44,351 DEBUG [RS:0;83f56b55f2af:35041 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:33:44,351 DEBUG [RS:0;83f56b55f2af:35041 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:33:44,351 DEBUG [RS:0;83f56b55f2af:35041 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:33:44,351 DEBUG [RS:0;83f56b55f2af:35041 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:33:44,351 DEBUG [RS:0;83f56b55f2af:35041 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:33:44,351 DEBUG [RS:0;83f56b55f2af:35041 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/83f56b55f2af:0, corePoolSize=2, maxPoolSize=2 2024-11-14T09:33:44,352 DEBUG [RS:0;83f56b55f2af:35041 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:33:44,352 DEBUG [RS:0;83f56b55f2af:35041 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:33:44,352 DEBUG [RS:0;83f56b55f2af:35041 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:33:44,352 DEBUG [RS:0;83f56b55f2af:35041 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:33:44,352 DEBUG [RS:0;83f56b55f2af:35041 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:33:44,352 DEBUG [RS:0;83f56b55f2af:35041 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/83f56b55f2af:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:33:44,352 DEBUG [RS:0;83f56b55f2af:35041 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/83f56b55f2af:0, corePoolSize=3, maxPoolSize=3 2024-11-14T09:33:44,352 DEBUG [RS:0;83f56b55f2af:35041 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/83f56b55f2af:0, corePoolSize=3, maxPoolSize=3 2024-11-14T09:33:44,352 INFO [RS:0;83f56b55f2af:35041 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-14T09:33:44,352 INFO [RS:0;83f56b55f2af:35041 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-14T09:33:44,352 INFO [RS:0;83f56b55f2af:35041 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T09:33:44,352 INFO [RS:0;83f56b55f2af:35041 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-14T09:33:44,352 INFO [RS:0;83f56b55f2af:35041 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-14T09:33:44,352 INFO [RS:0;83f56b55f2af:35041 {}] hbase.ChoreService(168): Chore ScheduledChore name=83f56b55f2af,35041,1731576824108-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-14T09:33:44,366 INFO [RS:0;83f56b55f2af:35041 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-14T09:33:44,366 INFO [RS:0;83f56b55f2af:35041 {}] hbase.ChoreService(168): Chore ScheduledChore name=83f56b55f2af,35041,1731576824108-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T09:33:44,366 INFO [RS:0;83f56b55f2af:35041 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T09:33:44,366 INFO [RS:0;83f56b55f2af:35041 {}] regionserver.Replication(171): 83f56b55f2af,35041,1731576824108 started 2024-11-14T09:33:44,379 INFO [RS:0;83f56b55f2af:35041 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T09:33:44,379 INFO [RS:0;83f56b55f2af:35041 {}] regionserver.HRegionServer(1482): Serving as 83f56b55f2af,35041,1731576824108, RpcServer on 83f56b55f2af/172.17.0.2:35041, sessionid=0x10115d36b790001 2024-11-14T09:33:44,379 DEBUG [RS:0;83f56b55f2af:35041 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-14T09:33:44,379 DEBUG [RS:0;83f56b55f2af:35041 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 83f56b55f2af,35041,1731576824108 2024-11-14T09:33:44,379 DEBUG [RS:0;83f56b55f2af:35041 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '83f56b55f2af,35041,1731576824108' 2024-11-14T09:33:44,379 DEBUG [RS:0;83f56b55f2af:35041 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-14T09:33:44,380 DEBUG [RS:0;83f56b55f2af:35041 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-14T09:33:44,380 DEBUG [RS:0;83f56b55f2af:35041 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-14T09:33:44,380 DEBUG [RS:0;83f56b55f2af:35041 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-14T09:33:44,380 DEBUG [RS:0;83f56b55f2af:35041 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 83f56b55f2af,35041,1731576824108 2024-11-14T09:33:44,380 DEBUG [RS:0;83f56b55f2af:35041 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '83f56b55f2af,35041,1731576824108' 2024-11-14T09:33:44,380 DEBUG [RS:0;83f56b55f2af:35041 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-14T09:33:44,381 DEBUG [RS:0;83f56b55f2af:35041 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-14T09:33:44,381 DEBUG [RS:0;83f56b55f2af:35041 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-14T09:33:44,381 INFO [RS:0;83f56b55f2af:35041 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-14T09:33:44,381 INFO [RS:0;83f56b55f2af:35041 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-14T09:33:44,425 WARN [83f56b55f2af:37969 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-14T09:33:44,483 INFO [RS:0;83f56b55f2af:35041 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=83f56b55f2af%2C35041%2C1731576824108, suffix=, logDir=hdfs://localhost:45383/user/jenkins/test-data/8d2230b7-a540-cbf9-31dd-c5c0c375f6d6/WALs/83f56b55f2af,35041,1731576824108, archiveDir=hdfs://localhost:45383/user/jenkins/test-data/8d2230b7-a540-cbf9-31dd-c5c0c375f6d6/oldWALs, maxLogs=32 2024-11-14T09:33:44,483 INFO [RS:0;83f56b55f2af:35041 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 83f56b55f2af%2C35041%2C1731576824108.1731576824483 2024-11-14T09:33:44,488 INFO [RS:0;83f56b55f2af:35041 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/8d2230b7-a540-cbf9-31dd-c5c0c375f6d6/WALs/83f56b55f2af,35041,1731576824108/83f56b55f2af%2C35041%2C1731576824108.1731576824483 2024-11-14T09:33:44,489 DEBUG [RS:0;83f56b55f2af:35041 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39179:39179),(127.0.0.1/127.0.0.1:45069:45069)] 2024-11-14T09:33:44,675 DEBUG [83f56b55f2af:37969 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-14T09:33:44,675 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=83f56b55f2af,35041,1731576824108 2024-11-14T09:33:44,677 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 83f56b55f2af,35041,1731576824108, state=OPENING 2024-11-14T09:33:44,678 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-14T09:33:44,680 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37969-0x10115d36b790000, quorum=127.0.0.1:56788, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:33:44,680 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35041-0x10115d36b790001, quorum=127.0.0.1:56788, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:33:44,680 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-14T09:33:44,680 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T09:33:44,680 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T09:33:44,680 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=83f56b55f2af,35041,1731576824108}] 2024-11-14T09:33:44,833 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-14T09:33:44,835 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49071, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-14T09:33:44,838 INFO [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-14T09:33:44,838 INFO [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T09:33:44,839 INFO [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=83f56b55f2af%2C35041%2C1731576824108.meta, suffix=.meta, logDir=hdfs://localhost:45383/user/jenkins/test-data/8d2230b7-a540-cbf9-31dd-c5c0c375f6d6/WALs/83f56b55f2af,35041,1731576824108, archiveDir=hdfs://localhost:45383/user/jenkins/test-data/8d2230b7-a540-cbf9-31dd-c5c0c375f6d6/oldWALs, maxLogs=32 2024-11-14T09:33:44,840 INFO [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 83f56b55f2af%2C35041%2C1731576824108.meta.1731576824840.meta 2024-11-14T09:33:44,845 INFO [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/8d2230b7-a540-cbf9-31dd-c5c0c375f6d6/WALs/83f56b55f2af,35041,1731576824108/83f56b55f2af%2C35041%2C1731576824108.meta.1731576824840.meta 2024-11-14T09:33:44,848 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39179:39179),(127.0.0.1/127.0.0.1:45069:45069)] 2024-11-14T09:33:44,852 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-14T09:33:44,853 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-14T09:33:44,853 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-14T09:33:44,853 INFO [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-14T09:33:44,853 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-14T09:33:44,853 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T09:33:44,853 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-14T09:33:44,853 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-14T09:33:44,854 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-14T09:33:44,855 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-14T09:33:44,855 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:33:44,855 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:33:44,855 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-14T09:33:44,856 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-14T09:33:44,856 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:33:44,856 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:33:44,856 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-14T09:33:44,857 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-14T09:33:44,857 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:33:44,857 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:33:44,857 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-14T09:33:44,858 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-14T09:33:44,858 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:33:44,858 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:33:44,858 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-14T09:33:44,859 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45383/user/jenkins/test-data/8d2230b7-a540-cbf9-31dd-c5c0c375f6d6/data/hbase/meta/1588230740 2024-11-14T09:33:44,859 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45383/user/jenkins/test-data/8d2230b7-a540-cbf9-31dd-c5c0c375f6d6/data/hbase/meta/1588230740 2024-11-14T09:33:44,860 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-14T09:33:44,860 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-14T09:33:44,861 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-14T09:33:44,862 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-14T09:33:44,862 INFO [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=811863, jitterRate=0.03233781456947327}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-14T09:33:44,862 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-14T09:33:44,863 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731576824853Writing region info on filesystem at 1731576824853Initializing all the Stores at 1731576824854 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731576824854Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731576824854Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731576824854Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731576824854Cleaning up temporary data from old regions at 1731576824860 (+6 ms)Running coprocessor post-open hooks at 1731576824862 (+2 ms)Region opened successfully at 1731576824863 (+1 ms) 2024-11-14T09:33:44,864 INFO [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731576824832 2024-11-14T09:33:44,866 DEBUG [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-14T09:33:44,866 INFO [RS_OPEN_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-14T09:33:44,866 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=83f56b55f2af,35041,1731576824108 2024-11-14T09:33:44,867 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 83f56b55f2af,35041,1731576824108, state=OPEN 2024-11-14T09:33:44,874 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35041-0x10115d36b790001, quorum=127.0.0.1:56788, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-14T09:33:44,874 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37969-0x10115d36b790000, quorum=127.0.0.1:56788, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-14T09:33:44,874 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=83f56b55f2af,35041,1731576824108 2024-11-14T09:33:44,874 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T09:33:44,874 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T09:33:44,876 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-14T09:33:44,876 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=83f56b55f2af,35041,1731576824108 in 194 msec 2024-11-14T09:33:44,878 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-14T09:33:44,878 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 603 msec 2024-11-14T09:33:44,879 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T09:33:44,879 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-14T09:33:44,880 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-14T09:33:44,880 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=83f56b55f2af,35041,1731576824108, seqNum=-1] 2024-11-14T09:33:44,880 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-14T09:33:44,881 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50761, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-14T09:33:44,885 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 641 msec 2024-11-14T09:33:44,885 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731576824885, completionTime=-1 2024-11-14T09:33:44,885 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-14T09:33:44,886 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-14T09:33:44,887 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-14T09:33:44,887 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731576884887 2024-11-14T09:33:44,887 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731576944887 2024-11-14T09:33:44,887 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 1 msec 2024-11-14T09:33:44,887 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=83f56b55f2af,37969,1731576824055-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T09:33:44,887 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=83f56b55f2af,37969,1731576824055-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T09:33:44,887 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=83f56b55f2af,37969,1731576824055-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T09:33:44,888 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-83f56b55f2af:37969, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T09:33:44,888 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-14T09:33:44,888 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-14T09:33:44,889 DEBUG [master/83f56b55f2af:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-14T09:33:44,891 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.753sec 2024-11-14T09:33:44,891 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-14T09:33:44,891 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-14T09:33:44,891 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-14T09:33:44,891 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-14T09:33:44,891 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-14T09:33:44,891 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=83f56b55f2af,37969,1731576824055-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-14T09:33:44,891 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=83f56b55f2af,37969,1731576824055-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-14T09:33:44,893 DEBUG [master/83f56b55f2af:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-14T09:33:44,893 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-14T09:33:44,893 INFO [master/83f56b55f2af:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=83f56b55f2af,37969,1731576824055-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T09:33:44,921 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4c310f10, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T09:33:44,921 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 83f56b55f2af,37969,-1 for getting cluster id 2024-11-14T09:33:44,921 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-14T09:33:44,922 DEBUG [HMaster-EventLoopGroup-16-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '0bec415a-c62c-4d4f-ac87-50f8400a818b' 2024-11-14T09:33:44,923 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-14T09:33:44,923 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "0bec415a-c62c-4d4f-ac87-50f8400a818b" 2024-11-14T09:33:44,923 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@39eda9d6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T09:33:44,923 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [83f56b55f2af,37969,-1] 2024-11-14T09:33:44,923 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-14T09:33:44,923 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T09:33:44,924 INFO [HMaster-EventLoopGroup-16-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41784, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-14T09:33:44,925 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@161413b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T09:33:44,925 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-14T09:33:44,926 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=83f56b55f2af,35041,1731576824108, seqNum=-1] 2024-11-14T09:33:44,926 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-14T09:33:44,927 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43056, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-14T09:33:44,928 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=83f56b55f2af,37969,1731576824055 2024-11-14T09:33:44,929 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:33:44,931 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-14T09:33:44,931 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T09:33:44,933 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=test.com%2C8080%2C1, suffix=, logDir=hdfs://localhost:45383/user/jenkins/test-data/8d2230b7-a540-cbf9-31dd-c5c0c375f6d6/WALs/test.com,8080,1, archiveDir=hdfs://localhost:45383/user/jenkins/test-data/8d2230b7-a540-cbf9-31dd-c5c0c375f6d6/oldWALs, maxLogs=32 2024-11-14T09:33:44,933 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1731576824933 2024-11-14T09:33:44,937 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/8d2230b7-a540-cbf9-31dd-c5c0c375f6d6/WALs/test.com,8080,1/test.com%2C8080%2C1.1731576824933 2024-11-14T09:33:44,938 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45069:45069),(127.0.0.1/127.0.0.1:39179:39179)] 2024-11-14T09:33:44,939 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1731576824939 2024-11-14T09:33:44,943 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:33:44,943 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:33:44,943 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:33:44,943 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:33:44,943 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:33:44,943 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/8d2230b7-a540-cbf9-31dd-c5c0c375f6d6/WALs/test.com,8080,1/test.com%2C8080%2C1.1731576824933 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/8d2230b7-a540-cbf9-31dd-c5c0c375f6d6/WALs/test.com,8080,1/test.com%2C8080%2C1.1731576824939 2024-11-14T09:33:44,944 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39179:39179),(127.0.0.1/127.0.0.1:45069:45069)] 2024-11-14T09:33:44,944 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:45383/user/jenkins/test-data/8d2230b7-a540-cbf9-31dd-c5c0c375f6d6/WALs/test.com,8080,1/test.com%2C8080%2C1.1731576824933 is not closed yet, will try archiving it next time 2024-11-14T09:33:44,944 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:33:44,944 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:33:44,944 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:33:44,945 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:33:44,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46029 is added to blk_1073741835_1011 (size=93) 2024-11-14T09:33:44,945 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:33:44,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36151 is added to blk_1073741835_1011 (size=93) 2024-11-14T09:33:44,946 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45383/user/jenkins/test-data/8d2230b7-a540-cbf9-31dd-c5c0c375f6d6/WALs/test.com,8080,1/test.com%2C8080%2C1.1731576824933 to hdfs://localhost:45383/user/jenkins/test-data/8d2230b7-a540-cbf9-31dd-c5c0c375f6d6/oldWALs/test.com%2C8080%2C1.1731576824933 2024-11-14T09:33:44,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36151 is added to blk_1073741836_1012 (size=93) 2024-11-14T09:33:44,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46029 is added to blk_1073741836_1012 (size=93) 2024-11-14T09:33:44,948 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/8d2230b7-a540-cbf9-31dd-c5c0c375f6d6/oldWALs 2024-11-14T09:33:44,948 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog test.com%2C8080%2C1:(num 1731576824939) 2024-11-14T09:33:44,948 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-14T09:33:44,948 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-14T09:33:44,948 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T09:33:44,948 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T09:33:44,948 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T09:33:44,949 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-14T09:33:44,949 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-14T09:33:44,949 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=62704458, stopped=false 2024-11-14T09:33:44,949 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=83f56b55f2af,37969,1731576824055 2024-11-14T09:33:44,950 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35041-0x10115d36b790001, quorum=127.0.0.1:56788, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-14T09:33:44,950 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37969-0x10115d36b790000, quorum=127.0.0.1:56788, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-14T09:33:44,950 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37969-0x10115d36b790000, quorum=127.0.0.1:56788, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:33:44,950 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35041-0x10115d36b790001, quorum=127.0.0.1:56788, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:33:44,950 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-14T09:33:44,950 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-14T09:33:44,951 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T09:33:44,951 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T09:33:44,951 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '83f56b55f2af,35041,1731576824108' ***** 2024-11-14T09:33:44,951 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-14T09:33:44,951 INFO [RS:0;83f56b55f2af:35041 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-14T09:33:44,951 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:35041-0x10115d36b790001, quorum=127.0.0.1:56788, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T09:33:44,951 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-14T09:33:44,951 INFO [RS:0;83f56b55f2af:35041 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-14T09:33:44,951 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:37969-0x10115d36b790000, quorum=127.0.0.1:56788, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T09:33:44,951 INFO [RS:0;83f56b55f2af:35041 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-14T09:33:44,951 INFO [RS:0;83f56b55f2af:35041 {}] regionserver.HRegionServer(959): stopping server 83f56b55f2af,35041,1731576824108 2024-11-14T09:33:44,951 INFO [RS:0;83f56b55f2af:35041 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-14T09:33:44,951 INFO [RS:0;83f56b55f2af:35041 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;83f56b55f2af:35041. 2024-11-14T09:33:44,951 DEBUG [RS:0;83f56b55f2af:35041 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T09:33:44,951 DEBUG [RS:0;83f56b55f2af:35041 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T09:33:44,952 INFO [RS:0;83f56b55f2af:35041 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-14T09:33:44,952 INFO [RS:0;83f56b55f2af:35041 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-14T09:33:44,952 INFO [RS:0;83f56b55f2af:35041 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-14T09:33:44,951 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,44811,1731576631688/83f56b55f2af%2C44811%2C1731576631688.meta.1731576632529.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:33:44,952 INFO [RS:0;83f56b55f2af:35041 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-14T09:33:44,952 INFO [RS:0;83f56b55f2af:35041 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-14T09:33:44,952 DEBUG [RS:0;83f56b55f2af:35041 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-14T09:33:44,952 DEBUG [RS:0;83f56b55f2af:35041 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-14T09:33:44,952 DEBUG [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-14T09:33:44,952 INFO [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-14T09:33:44,952 DEBUG [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-14T09:33:44,952 DEBUG [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-14T09:33:44,952 DEBUG [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-14T09:33:44,952 INFO [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-14T09:33:44,958 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36133/user/jenkins/test-data/57092ce0-877f-6de3-f852-40c06eacfd60/WALs/83f56b55f2af,41383,1731576632641/83f56b55f2af%2C41383%2C1731576632641.1731576632831 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:33:44,968 DEBUG [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45383/user/jenkins/test-data/8d2230b7-a540-cbf9-31dd-c5c0c375f6d6/data/hbase/meta/1588230740/.tmp/ns/b4295615fae94c55aec8303114c3343a is 43, key is default/ns:d/1731576824882/Put/seqid=0 2024-11-14T09:33:44,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36151 is added to blk_1073741837_1013 (size=5153) 2024-11-14T09:33:44,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46029 is added to blk_1073741837_1013 (size=5153) 2024-11-14T09:33:44,972 INFO [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:45383/user/jenkins/test-data/8d2230b7-a540-cbf9-31dd-c5c0c375f6d6/data/hbase/meta/1588230740/.tmp/ns/b4295615fae94c55aec8303114c3343a 2024-11-14T09:33:44,977 DEBUG [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45383/user/jenkins/test-data/8d2230b7-a540-cbf9-31dd-c5c0c375f6d6/data/hbase/meta/1588230740/.tmp/ns/b4295615fae94c55aec8303114c3343a as hdfs://localhost:45383/user/jenkins/test-data/8d2230b7-a540-cbf9-31dd-c5c0c375f6d6/data/hbase/meta/1588230740/ns/b4295615fae94c55aec8303114c3343a 2024-11-14T09:33:44,981 INFO [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45383/user/jenkins/test-data/8d2230b7-a540-cbf9-31dd-c5c0c375f6d6/data/hbase/meta/1588230740/ns/b4295615fae94c55aec8303114c3343a, entries=2, sequenceid=6, filesize=5.0 K 2024-11-14T09:33:44,982 INFO [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 30ms, sequenceid=6, compaction requested=false 2024-11-14T09:33:44,985 DEBUG [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45383/user/jenkins/test-data/8d2230b7-a540-cbf9-31dd-c5c0c375f6d6/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-14T09:33:44,986 DEBUG [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-14T09:33:44,986 INFO [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-14T09:33:44,986 DEBUG [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731576824952Running coprocessor pre-close hooks at 1731576824952Disabling compacts and flushes for region at 1731576824952Disabling writes for close at 1731576824952Obtaining lock to block concurrent updates at 1731576824952Preparing flush snapshotting stores in 1588230740 at 1731576824952Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1731576824953 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731576824953Flushing 1588230740/ns: creating writer at 1731576824953Flushing 1588230740/ns: appending metadata at 1731576824967 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1731576824967Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@775ef7fa: reopening flushed file at 1731576824976 (+9 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 30ms, sequenceid=6, compaction requested=false at 1731576824982 (+6 ms)Writing region close event to WAL at 1731576824983 (+1 ms)Running coprocessor post-close hooks at 1731576824986 (+3 ms)Closed at 1731576824986 2024-11-14T09:33:44,986 DEBUG [RS_CLOSE_META-regionserver/83f56b55f2af:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-14T09:33:45,152 INFO [RS:0;83f56b55f2af:35041 {}] regionserver.HRegionServer(976): stopping server 83f56b55f2af,35041,1731576824108; all regions closed. 2024-11-14T09:33:45,153 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:33:45,153 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:33:45,153 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:33:45,153 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:33:45,153 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:33:45,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36151 is added to blk_1073741834_1010 (size=1152) 2024-11-14T09:33:45,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46029 is added to blk_1073741834_1010 (size=1152) 2024-11-14T09:33:45,157 DEBUG [RS:0;83f56b55f2af:35041 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/8d2230b7-a540-cbf9-31dd-c5c0c375f6d6/oldWALs 2024-11-14T09:33:45,157 INFO [RS:0;83f56b55f2af:35041 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 83f56b55f2af%2C35041%2C1731576824108.meta:.meta(num 1731576824840) 2024-11-14T09:33:45,158 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:33:45,158 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:33:45,158 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:33:45,158 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:33:45,158 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:33:45,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36151 is added to blk_1073741833_1009 (size=93) 2024-11-14T09:33:45,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46029 is added to blk_1073741833_1009 (size=93) 2024-11-14T09:33:45,161 DEBUG [RS:0;83f56b55f2af:35041 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/8d2230b7-a540-cbf9-31dd-c5c0c375f6d6/oldWALs 2024-11-14T09:33:45,161 INFO [RS:0;83f56b55f2af:35041 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 83f56b55f2af%2C35041%2C1731576824108:(num 1731576824483) 2024-11-14T09:33:45,161 DEBUG [RS:0;83f56b55f2af:35041 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T09:33:45,161 INFO [RS:0;83f56b55f2af:35041 {}] regionserver.LeaseManager(133): Closed leases 2024-11-14T09:33:45,161 INFO [RS:0;83f56b55f2af:35041 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-14T09:33:45,161 INFO [RS:0;83f56b55f2af:35041 {}] hbase.ChoreService(370): Chore service for: regionserver/83f56b55f2af:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-14T09:33:45,161 INFO [RS:0;83f56b55f2af:35041 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-14T09:33:45,162 INFO [regionserver/83f56b55f2af:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-14T09:33:45,162 INFO [RS:0;83f56b55f2af:35041 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35041 2024-11-14T09:33:45,164 INFO [RS:0;83f56b55f2af:35041 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-14T09:33:45,164 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35041-0x10115d36b790001, quorum=127.0.0.1:56788, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/83f56b55f2af,35041,1731576824108 2024-11-14T09:33:45,164 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37969-0x10115d36b790000, quorum=127.0.0.1:56788, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-14T09:33:45,165 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [83f56b55f2af,35041,1731576824108] 2024-11-14T09:33:45,167 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/83f56b55f2af,35041,1731576824108 already deleted, retry=false 2024-11-14T09:33:45,167 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 83f56b55f2af,35041,1731576824108 expired; onlineServers=0 2024-11-14T09:33:45,167 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '83f56b55f2af,37969,1731576824055' ***** 2024-11-14T09:33:45,167 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-14T09:33:45,167 INFO [M:0;83f56b55f2af:37969 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-14T09:33:45,167 INFO [M:0;83f56b55f2af:37969 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-14T09:33:45,168 DEBUG [M:0;83f56b55f2af:37969 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-14T09:33:45,168 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-14T09:33:45,168 DEBUG [M:0;83f56b55f2af:37969 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-14T09:33:45,168 DEBUG [master/83f56b55f2af:0:becomeActiveMaster-HFileCleaner.large.0-1731576824248 {}] cleaner.HFileCleaner(306): Exit Thread[master/83f56b55f2af:0:becomeActiveMaster-HFileCleaner.large.0-1731576824248,5,FailOnTimeoutGroup] 2024-11-14T09:33:45,168 DEBUG [master/83f56b55f2af:0:becomeActiveMaster-HFileCleaner.small.0-1731576824249 {}] cleaner.HFileCleaner(306): Exit Thread[master/83f56b55f2af:0:becomeActiveMaster-HFileCleaner.small.0-1731576824249,5,FailOnTimeoutGroup] 2024-11-14T09:33:45,168 INFO [M:0;83f56b55f2af:37969 {}] hbase.ChoreService(370): Chore service for: master/83f56b55f2af:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-14T09:33:45,168 INFO [M:0;83f56b55f2af:37969 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-14T09:33:45,168 DEBUG [M:0;83f56b55f2af:37969 {}] master.HMaster(1795): Stopping service threads 2024-11-14T09:33:45,168 INFO [M:0;83f56b55f2af:37969 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-14T09:33:45,168 INFO [M:0;83f56b55f2af:37969 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-14T09:33:45,168 INFO [M:0;83f56b55f2af:37969 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-14T09:33:45,168 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-14T09:33:45,169 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37969-0x10115d36b790000, quorum=127.0.0.1:56788, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-14T09:33:45,169 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37969-0x10115d36b790000, quorum=127.0.0.1:56788, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:33:45,169 DEBUG [M:0;83f56b55f2af:37969 {}] zookeeper.ZKUtil(347): master:37969-0x10115d36b790000, quorum=127.0.0.1:56788, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-14T09:33:45,169 WARN [M:0;83f56b55f2af:37969 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-14T09:33:45,170 INFO [M:0;83f56b55f2af:37969 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:45383/user/jenkins/test-data/8d2230b7-a540-cbf9-31dd-c5c0c375f6d6/.lastflushedseqids 2024-11-14T09:33:45,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46029 is added to blk_1073741838_1014 (size=99) 2024-11-14T09:33:45,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36151 is added to blk_1073741838_1014 (size=99) 2024-11-14T09:33:45,175 INFO [M:0;83f56b55f2af:37969 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-14T09:33:45,175 INFO [M:0;83f56b55f2af:37969 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-14T09:33:45,175 DEBUG [M:0;83f56b55f2af:37969 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-14T09:33:45,175 INFO [M:0;83f56b55f2af:37969 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:33:45,175 DEBUG [M:0;83f56b55f2af:37969 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:33:45,175 DEBUG [M:0;83f56b55f2af:37969 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-14T09:33:45,175 DEBUG [M:0;83f56b55f2af:37969 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:33:45,175 INFO [M:0;83f56b55f2af:37969 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-14T09:33:45,190 DEBUG [M:0;83f56b55f2af:37969 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45383/user/jenkins/test-data/8d2230b7-a540-cbf9-31dd-c5c0c375f6d6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/948338e619ce4e55a69f2e93780898b8 is 82, key is hbase:meta,,1/info:regioninfo/1731576824866/Put/seqid=0 2024-11-14T09:33:45,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36151 is added to blk_1073741839_1015 (size=5672) 2024-11-14T09:33:45,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46029 is added to blk_1073741839_1015 (size=5672) 2024-11-14T09:33:45,194 INFO [M:0;83f56b55f2af:37969 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:45383/user/jenkins/test-data/8d2230b7-a540-cbf9-31dd-c5c0c375f6d6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/948338e619ce4e55a69f2e93780898b8 2024-11-14T09:33:45,211 DEBUG [M:0;83f56b55f2af:37969 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45383/user/jenkins/test-data/8d2230b7-a540-cbf9-31dd-c5c0c375f6d6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b5f18576bda0485ebdf550a03f9704cf is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1731576824885/Put/seqid=0 2024-11-14T09:33:45,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46029 is added to blk_1073741840_1016 (size=5275) 2024-11-14T09:33:45,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36151 is added to blk_1073741840_1016 (size=5275) 2024-11-14T09:33:45,217 INFO [M:0;83f56b55f2af:37969 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:45383/user/jenkins/test-data/8d2230b7-a540-cbf9-31dd-c5c0c375f6d6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b5f18576bda0485ebdf550a03f9704cf 2024-11-14T09:33:45,235 DEBUG [M:0;83f56b55f2af:37969 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45383/user/jenkins/test-data/8d2230b7-a540-cbf9-31dd-c5c0c375f6d6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1ed2a28396684ab1bf39cf404bfb345c is 69, key is 83f56b55f2af,35041,1731576824108/rs:state/1731576824342/Put/seqid=0 2024-11-14T09:33:45,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36151 is added to blk_1073741841_1017 (size=5156) 2024-11-14T09:33:45,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46029 is added to blk_1073741841_1017 (size=5156) 2024-11-14T09:33:45,240 INFO [M:0;83f56b55f2af:37969 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:45383/user/jenkins/test-data/8d2230b7-a540-cbf9-31dd-c5c0c375f6d6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1ed2a28396684ab1bf39cf404bfb345c 2024-11-14T09:33:45,257 DEBUG [M:0;83f56b55f2af:37969 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45383/user/jenkins/test-data/8d2230b7-a540-cbf9-31dd-c5c0c375f6d6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/390800d59f5742bf990c427da8a1dcda is 52, key is load_balancer_on/state:d/1731576824930/Put/seqid=0 2024-11-14T09:33:45,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36151 is added to blk_1073741842_1018 (size=5056) 2024-11-14T09:33:45,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46029 is added to blk_1073741842_1018 (size=5056) 2024-11-14T09:33:45,262 INFO [M:0;83f56b55f2af:37969 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:45383/user/jenkins/test-data/8d2230b7-a540-cbf9-31dd-c5c0c375f6d6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/390800d59f5742bf990c427da8a1dcda 2024-11-14T09:33:45,265 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35041-0x10115d36b790001, quorum=127.0.0.1:56788, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T09:33:45,265 INFO [RS:0;83f56b55f2af:35041 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-14T09:33:45,265 INFO [RS:0;83f56b55f2af:35041 {}] regionserver.HRegionServer(1031): Exiting; stopping=83f56b55f2af,35041,1731576824108; zookeeper connection closed. 2024-11-14T09:33:45,265 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35041-0x10115d36b790001, quorum=127.0.0.1:56788, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T09:33:45,265 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@3bae1784 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@3bae1784 2024-11-14T09:33:45,266 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-14T09:33:45,266 DEBUG [M:0;83f56b55f2af:37969 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45383/user/jenkins/test-data/8d2230b7-a540-cbf9-31dd-c5c0c375f6d6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/948338e619ce4e55a69f2e93780898b8 as hdfs://localhost:45383/user/jenkins/test-data/8d2230b7-a540-cbf9-31dd-c5c0c375f6d6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/948338e619ce4e55a69f2e93780898b8 2024-11-14T09:33:45,270 INFO [M:0;83f56b55f2af:37969 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45383/user/jenkins/test-data/8d2230b7-a540-cbf9-31dd-c5c0c375f6d6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/948338e619ce4e55a69f2e93780898b8, entries=8, sequenceid=29, filesize=5.5 K 2024-11-14T09:33:45,271 DEBUG [M:0;83f56b55f2af:37969 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45383/user/jenkins/test-data/8d2230b7-a540-cbf9-31dd-c5c0c375f6d6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b5f18576bda0485ebdf550a03f9704cf as hdfs://localhost:45383/user/jenkins/test-data/8d2230b7-a540-cbf9-31dd-c5c0c375f6d6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/b5f18576bda0485ebdf550a03f9704cf 2024-11-14T09:33:45,274 INFO [M:0;83f56b55f2af:37969 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45383/user/jenkins/test-data/8d2230b7-a540-cbf9-31dd-c5c0c375f6d6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/b5f18576bda0485ebdf550a03f9704cf, entries=3, sequenceid=29, filesize=5.2 K 2024-11-14T09:33:45,275 DEBUG [M:0;83f56b55f2af:37969 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45383/user/jenkins/test-data/8d2230b7-a540-cbf9-31dd-c5c0c375f6d6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1ed2a28396684ab1bf39cf404bfb345c as hdfs://localhost:45383/user/jenkins/test-data/8d2230b7-a540-cbf9-31dd-c5c0c375f6d6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/1ed2a28396684ab1bf39cf404bfb345c 2024-11-14T09:33:45,278 INFO [M:0;83f56b55f2af:37969 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45383/user/jenkins/test-data/8d2230b7-a540-cbf9-31dd-c5c0c375f6d6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/1ed2a28396684ab1bf39cf404bfb345c, entries=1, sequenceid=29, filesize=5.0 K 2024-11-14T09:33:45,279 DEBUG [M:0;83f56b55f2af:37969 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45383/user/jenkins/test-data/8d2230b7-a540-cbf9-31dd-c5c0c375f6d6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/390800d59f5742bf990c427da8a1dcda as hdfs://localhost:45383/user/jenkins/test-data/8d2230b7-a540-cbf9-31dd-c5c0c375f6d6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/390800d59f5742bf990c427da8a1dcda 2024-11-14T09:33:45,282 INFO [M:0;83f56b55f2af:37969 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45383/user/jenkins/test-data/8d2230b7-a540-cbf9-31dd-c5c0c375f6d6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/390800d59f5742bf990c427da8a1dcda, entries=1, sequenceid=29, filesize=4.9 K 2024-11-14T09:33:45,283 INFO [M:0;83f56b55f2af:37969 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 108ms, sequenceid=29, compaction requested=false 2024-11-14T09:33:45,285 INFO [M:0;83f56b55f2af:37969 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:33:45,285 DEBUG [M:0;83f56b55f2af:37969 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731576825175Disabling compacts and flushes for region at 1731576825175Disabling writes for close at 1731576825175Obtaining lock to block concurrent updates at 1731576825175Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731576825175Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1731576825175Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731576825176 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731576825176Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731576825189 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731576825189Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731576825198 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731576825211 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731576825211Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731576825221 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731576825234 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731576825234Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731576825243 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731576825257 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731576825257Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@44aca1ce: reopening flushed file at 1731576825266 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4d355347: reopening flushed file at 1731576825270 (+4 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@37d249f7: reopening flushed file at 1731576825274 (+4 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@23d52e78: reopening flushed file at 1731576825279 (+5 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 108ms, sequenceid=29, compaction requested=false at 1731576825283 (+4 ms)Writing region close event to WAL at 1731576825285 (+2 ms)Closed at 1731576825285 2024-11-14T09:33:45,286 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:33:45,286 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:33:45,286 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:33:45,286 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:33:45,286 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:33:45,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36151 is added to blk_1073741830_1006 (size=10311) 2024-11-14T09:33:45,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46029 is added to blk_1073741830_1006 (size=10311) 2024-11-14T09:33:45,288 INFO [M:0;83f56b55f2af:37969 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-14T09:33:45,289 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-14T09:33:45,289 INFO [M:0;83f56b55f2af:37969 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37969 2024-11-14T09:33:45,289 INFO [M:0;83f56b55f2af:37969 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-14T09:33:45,391 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37969-0x10115d36b790000, quorum=127.0.0.1:56788, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T09:33:45,391 INFO [M:0;83f56b55f2af:37969 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-14T09:33:45,391 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37969-0x10115d36b790000, quorum=127.0.0.1:56788, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T09:33:45,393 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@26b7b1af{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T09:33:45,394 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@64daf748{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T09:33:45,394 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T09:33:45,394 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@69ffafd5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T09:33:45,394 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@694aa09b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/269359ac-9a66-a599-705f-239acd5fbc7d/hadoop.log.dir/,STOPPED} 2024-11-14T09:33:45,395 WARN [BP-1950380783-172.17.0.2-1731576823300 heartbeating to localhost/127.0.0.1:45383 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T09:33:45,395 WARN [BP-1950380783-172.17.0.2-1731576823300 heartbeating to localhost/127.0.0.1:45383 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1950380783-172.17.0.2-1731576823300 (Datanode Uuid 291f43d3-6b29-47c3-baee-024dac255524) service to localhost/127.0.0.1:45383 2024-11-14T09:33:45,395 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T09:33:45,395 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T09:33:45,396 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/269359ac-9a66-a599-705f-239acd5fbc7d/cluster_d45ae119-9bce-d3ef-f172-614ffa505192/data/data3/current/BP-1950380783-172.17.0.2-1731576823300 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T09:33:45,396 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/269359ac-9a66-a599-705f-239acd5fbc7d/cluster_d45ae119-9bce-d3ef-f172-614ffa505192/data/data4/current/BP-1950380783-172.17.0.2-1731576823300 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T09:33:45,396 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T09:33:45,398 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@67e3c4de{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T09:33:45,398 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5e22b5d8{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T09:33:45,398 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T09:33:45,398 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@44209567{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T09:33:45,398 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@10993583{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/269359ac-9a66-a599-705f-239acd5fbc7d/hadoop.log.dir/,STOPPED} 2024-11-14T09:33:45,399 WARN [BP-1950380783-172.17.0.2-1731576823300 heartbeating to localhost/127.0.0.1:45383 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T09:33:45,399 WARN [BP-1950380783-172.17.0.2-1731576823300 heartbeating to localhost/127.0.0.1:45383 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1950380783-172.17.0.2-1731576823300 (Datanode Uuid d6c1c73d-9a80-4ea9-9e58-bf836d65eaaf) service to localhost/127.0.0.1:45383 2024-11-14T09:33:45,399 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T09:33:45,399 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T09:33:45,400 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/269359ac-9a66-a599-705f-239acd5fbc7d/cluster_d45ae119-9bce-d3ef-f172-614ffa505192/data/data1/current/BP-1950380783-172.17.0.2-1731576823300 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T09:33:45,400 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/269359ac-9a66-a599-705f-239acd5fbc7d/cluster_d45ae119-9bce-d3ef-f172-614ffa505192/data/data2/current/BP-1950380783-172.17.0.2-1731576823300 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T09:33:45,400 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T09:33:45,405 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6ce65df0{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-14T09:33:45,406 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5859719{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T09:33:45,406 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T09:33:45,406 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@622c554c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T09:33:45,406 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5b0b2b30{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/269359ac-9a66-a599-705f-239acd5fbc7d/hadoop.log.dir/,STOPPED} 2024-11-14T09:33:45,414 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-14T09:33:45,427 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-14T09:33:45,436 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=267 (was 230) Potentially hanging thread: nioEventLoopGroup-43-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45383 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1709469494) connection to localhost/127.0.0.1:45383 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-45-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1709469494) connection to localhost/127.0.0.1:45383 from jenkins.hfs.7 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45383 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: globalEventExecutor-1-22 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//io.netty.util.concurrent.GlobalEventExecutor.takeTask(GlobalEventExecutor.java:113) app//io.netty.util.concurrent.GlobalEventExecutor$TaskRunner.run(GlobalEventExecutor.java:259) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1709469494) connection to localhost/127.0.0.1:45383 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins.hfs.7@localhost:45383 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:45383 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45383 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=537 (was 512) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=27 (was 27), ProcessCount=11 (was 11), AvailableMemoryMB=6461 (was 6469)